code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
"""Test the init file of Mailgun.""" import hashlib import hmac import pytest from homeassistant import config_entries, data_entry_flow from homeassistant.components import mailgun, webhook from homeassistant.config import async_process_ha_core_config from homeassistant.const import CONF_API_KEY, CONF_DOMAIN from homeassistant.core import callback from homeassistant.setup import async_setup_component API_KEY = "abc123" @pytest.fixture async def http_client(hass, hass_client_no_auth): """Initialize a Home Assistant Server for testing this module.""" await async_setup_component(hass, webhook.DOMAIN, {}) return await hass_client_no_auth() @pytest.fixture async def webhook_id_with_api_key(hass): """Initialize the Mailgun component and get the webhook_id.""" await async_setup_component( hass, mailgun.DOMAIN, {mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: "example.com"}}, ) await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) result = await hass.config_entries.flow.async_init( "mailgun", context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.FlowResultType.FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY return result["result"].data["webhook_id"] @pytest.fixture async def webhook_id_without_api_key(hass): """Initialize the Mailgun component and get the webhook_id w/o API key.""" await async_setup_component(hass, mailgun.DOMAIN, {}) await async_process_ha_core_config( hass, {"internal_url": "http://example.local:8123"}, ) result = await hass.config_entries.flow.async_init( "mailgun", context={"source": config_entries.SOURCE_USER} ) assert result["type"] == data_entry_flow.FlowResultType.FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.FlowResultType.CREATE_ENTRY return result["result"].data["webhook_id"] @pytest.fixture async def mailgun_events(hass): """Return a list of mailgun_events triggered.""" events = [] @callback def handle_event(event): """Handle Mailgun event.""" events.append(event) hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event) return events async def test_mailgun_webhook_with_missing_signature( http_client, webhook_id_with_api_key, mailgun_events ) -> None: """Test that webhook doesn't trigger an event without a signature.""" event_count = len(mailgun_events) await http_client.post( f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun", "signature": {}}, ) assert len(mailgun_events) == event_count await http_client.post( f"/api/webhook/{webhook_id_with_api_key}", json={"hello": "mailgun"} ) assert len(mailgun_events) == event_count async def test_mailgun_webhook_with_different_api_key( http_client, webhook_id_with_api_key, mailgun_events ) -> None: """Test that webhook doesn't trigger an event with a wrong signature.""" timestamp = "1529006854" token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0" event_count = len(mailgun_events) await http_client.post( f"/api/webhook/{webhook_id_with_api_key}", json={ "hello": "mailgun", "signature": { "signature": hmac.new( key=b"random_api_key", msg=bytes(f"{timestamp}{token}", "utf-8"), digestmod=hashlib.sha256, ).hexdigest(), "timestamp": timestamp, "token": token, }, }, ) assert len(mailgun_events) == event_count async def test_mailgun_webhook_event_with_correct_api_key( http_client, webhook_id_with_api_key, mailgun_events ) -> None: """Test that webhook triggers an event after validating a signature.""" timestamp = "1529006854" token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0" event_count = len(mailgun_events) await http_client.post( f"/api/webhook/{webhook_id_with_api_key}", json={ "hello": "mailgun", "signature": { "signature": hmac.new( key=bytes(API_KEY, "utf-8"), msg=bytes(f"{timestamp}{token}", "utf-8"), digestmod=hashlib.sha256, ).hexdigest(), "timestamp": timestamp, "token": token, }, }, ) assert len(mailgun_events) == event_count + 1 assert mailgun_events[-1].data["webhook_id"] == webhook_id_with_api_key assert mailgun_events[-1].data["hello"] == "mailgun" async def test_mailgun_webhook_with_missing_signature_without_api_key( http_client, webhook_id_without_api_key, mailgun_events ) -> None: """Test that webhook triggers an event without a signature w/o API key.""" event_count = len(mailgun_events) await http_client.post( f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun", "signature": {}}, ) assert len(mailgun_events) == event_count + 1 assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key assert mailgun_events[-1].data["hello"] == "mailgun" await http_client.post( f"/api/webhook/{webhook_id_without_api_key}", json={"hello": "mailgun"} ) assert len(mailgun_events) == event_count + 1 assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key assert mailgun_events[-1].data["hello"] == "mailgun" async def test_mailgun_webhook_event_without_an_api_key( http_client, webhook_id_without_api_key, mailgun_events ) -> None: """Test that webhook triggers an event if there is no api key.""" timestamp = "1529006854" token = "a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0" event_count = len(mailgun_events) await http_client.post( f"/api/webhook/{webhook_id_without_api_key}", json={ "hello": "mailgun", "signature": { "signature": hmac.new( key=bytes(API_KEY, "utf-8"), msg=bytes(f"{timestamp}{token}", "utf-8"), digestmod=hashlib.sha256, ).hexdigest(), "timestamp": timestamp, "token": token, }, }, ) assert len(mailgun_events) == event_count + 1 assert mailgun_events[-1].data["webhook_id"] == webhook_id_without_api_key assert mailgun_events[-1].data["hello"] == "mailgun"
normal
{ "blob_id": "a55024f0e5edec22125ce53ef54ee364be185cb8", "index": 1099, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n", "step-3": "<mask token>\nAPI_KEY = 'abc123'\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n", "step-4": "<mask token>\nimport hashlib\nimport hmac\nimport pytest\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\nAPI_KEY = 'abc123'\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {mailgun.DOMAIN: {\n CONF_API_KEY: API_KEY, CONF_DOMAIN: 'example.com'}})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n await async_process_ha_core_config(hass, {'internal_url':\n 'http://example.local:8123'})\n result = await hass.config_entries.flow.async_init('mailgun', context={\n 'source': config_entries.SOURCE_USER})\n assert result['type'] == data_entry_flow.FlowResultType.FORM, result\n result = await hass.config_entries.flow.async_configure(result[\n 'flow_id'], {})\n assert result['type'] == data_entry_flow.FlowResultType.CREATE_ENTRY\n return result['result'].data['webhook_id']\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun'})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n b'random_api_key', msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(http_client,\n webhook_id_with_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_with_api_key}', json=\n {'hello': 'mailgun', 'signature': {'signature': hmac.new(key=bytes(\n API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_with_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun'})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(http_client,\n webhook_id_without_api_key, mailgun_events) ->None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = '1529006854'\n token = 'a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0'\n event_count = len(mailgun_events)\n await http_client.post(f'/api/webhook/{webhook_id_without_api_key}',\n json={'hello': 'mailgun', 'signature': {'signature': hmac.new(key=\n bytes(API_KEY, 'utf-8'), msg=bytes(f'{timestamp}{token}', 'utf-8'),\n digestmod=hashlib.sha256).hexdigest(), 'timestamp': timestamp,\n 'token': token}})\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data['webhook_id'] == webhook_id_without_api_key\n assert mailgun_events[-1].data['hello'] == 'mailgun'\n", "step-5": "\"\"\"Test the init file of Mailgun.\"\"\"\nimport hashlib\nimport hmac\n\nimport pytest\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.components import mailgun, webhook\nfrom homeassistant.config import async_process_ha_core_config\nfrom homeassistant.const import CONF_API_KEY, CONF_DOMAIN\nfrom homeassistant.core import callback\nfrom homeassistant.setup import async_setup_component\n\nAPI_KEY = \"abc123\"\n\n\[email protected]\nasync def http_client(hass, hass_client_no_auth):\n \"\"\"Initialize a Home Assistant Server for testing this module.\"\"\"\n await async_setup_component(hass, webhook.DOMAIN, {})\n return await hass_client_no_auth()\n\n\[email protected]\nasync def webhook_id_with_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id.\"\"\"\n await async_setup_component(\n hass,\n mailgun.DOMAIN,\n {mailgun.DOMAIN: {CONF_API_KEY: API_KEY, CONF_DOMAIN: \"example.com\"}},\n )\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\[email protected]\nasync def webhook_id_without_api_key(hass):\n \"\"\"Initialize the Mailgun component and get the webhook_id w/o API key.\"\"\"\n await async_setup_component(hass, mailgun.DOMAIN, {})\n\n await async_process_ha_core_config(\n hass,\n {\"internal_url\": \"http://example.local:8123\"},\n )\n result = await hass.config_entries.flow.async_init(\n \"mailgun\", context={\"source\": config_entries.SOURCE_USER}\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM, result\n\n result = await hass.config_entries.flow.async_configure(result[\"flow_id\"], {})\n assert result[\"type\"] == data_entry_flow.FlowResultType.CREATE_ENTRY\n\n return result[\"result\"].data[\"webhook_id\"]\n\n\[email protected]\nasync def mailgun_events(hass):\n \"\"\"Return a list of mailgun_events triggered.\"\"\"\n events = []\n\n @callback\n def handle_event(event):\n \"\"\"Handle Mailgun event.\"\"\"\n events.append(event)\n\n hass.bus.async_listen(mailgun.MESSAGE_RECEIVED, handle_event)\n\n return events\n\n\nasync def test_mailgun_webhook_with_missing_signature(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event without a signature.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_with_different_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook doesn't trigger an event with a wrong signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=b\"random_api_key\",\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count\n\n\nasync def test_mailgun_webhook_event_with_correct_api_key(\n http_client, webhook_id_with_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event after validating a signature.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_with_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_with_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_with_missing_signature_without_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event without a signature w/o API key.\"\"\"\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\"hello\": \"mailgun\", \"signature\": {}},\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\", json={\"hello\": \"mailgun\"}\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n\n\nasync def test_mailgun_webhook_event_without_an_api_key(\n http_client, webhook_id_without_api_key, mailgun_events\n) -> None:\n \"\"\"Test that webhook triggers an event if there is no api key.\"\"\"\n timestamp = \"1529006854\"\n token = \"a8ce0edb2dd8301dee6c2405235584e45aa91d1e9f979f3de0\"\n\n event_count = len(mailgun_events)\n\n await http_client.post(\n f\"/api/webhook/{webhook_id_without_api_key}\",\n json={\n \"hello\": \"mailgun\",\n \"signature\": {\n \"signature\": hmac.new(\n key=bytes(API_KEY, \"utf-8\"),\n msg=bytes(f\"{timestamp}{token}\", \"utf-8\"),\n digestmod=hashlib.sha256,\n ).hexdigest(),\n \"timestamp\": timestamp,\n \"token\": token,\n },\n },\n )\n\n assert len(mailgun_events) == event_count + 1\n assert mailgun_events[-1].data[\"webhook_id\"] == webhook_id_without_api_key\n assert mailgun_events[-1].data[\"hello\"] == \"mailgun\"\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import re list = ["Protein XVZ [Human]","Protein ABC [Mouse]","go UDP[3] glucosamine N-acyltransferase [virus1]","Protein CDY [Chicken [type1]]","Protein BBC [type 2] [Bacteria] [cat] [mat]","gi p19-gag protein [2] [Human T-lymphotropic virus 2]"] pattern = re.compile("\[(.*?)\]$") for string in list: match = re.search(pattern,string) lastBracket = re.split("\].*\[",match.group(1))[-1] print lastBracket
normal
{ "blob_id": "21c12aabfb21e84f3ea546842fb55c41d2129ff9", "index": 6526, "step-1": "import re\nlist = [\"Protein XVZ [Human]\",\"Protein ABC [Mouse]\",\"go UDP[3] glucosamine N-acyltransferase [virus1]\",\"Protein CDY [Chicken [type1]]\",\"Protein BBC [type 2] [Bacteria] [cat] [mat]\",\"gi p19-gag protein [2] [Human T-lymphotropic virus 2]\"]\npattern = re.compile(\"\\[(.*?)\\]$\")\nfor string in list:\n match = re.search(pattern,string)\n lastBracket = re.split(\"\\].*\\[\",match.group(1))[-1]\n print lastBracket\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
clear ; clc; %-----------------------读入图像-------------------------------------% markbefore=imread('p203.bmp'); markbefore2=rgb2gray(markbefore); mark=im2bw(markbefore2); figure(1); subplot(2,3,1); imshow(mark),title('水印图像'); [rm,cm]=size(mark); cover=imread('pic.bmp'); cover1=imresize(cover,[512,512]); cover_image=rgb2gray(cover1); subplot(2,3,2),imshow(cover_image,[]),title('原始图像'); before=blkproc(cover_image,[8 8],'dct2'); %将载体图像的灰度层分为8×8的小块,每一块内做二维DCT变换,结果记入矩阵before I=mark; alpha=50; %尺度因子,控制水印添加的强度,决定了频域系数被修改的幅度 k1=randn(1,8); %产生两个不同的随机序列 k2=randn(1,8); after=before; %初始化载入水印的结果矩阵 for i=1:rm %在中频段嵌入水印 for j=1:cm x=(i-1)*8; y=(j-1)*8; if mark(i,j)==1 k=k1; else k=k2; end; after(x+1,y+8)=before(x+1,y+8)+alpha*k(1); after(x+2,y+7)=before(x+2,y+7)+alpha*k(2); after(x+3,y+6)=before(x+3,y+6)+alpha*k(3); after(x+4,y+5)=before(x+4,y+5)+alpha*k(4); after(x+5,y+4)=before(x+5,y+4)+alpha*k(5); after(x+6,y+3)=before(x+6,y+3)+alpha*k(6); after(x+7,y+2)=before(x+7,y+2)+alpha*k(7); after(x+8,y+1)=before(x+8,y+1)+alpha*k(8); end; end; result=blkproc(after,[8 8],'idct2'); %将经处理的图像分为8×8的小块,每一块内做二维DCT逆变换 result = uint8(result); imwrite(result,'watermarked.bmp','bmp'); %隐写图像命名为watermarked.bmp subplot(2,3,3),imshow(result,[]),title('隐写图像'); subplot(2,3,4); imshow(result,[]); title('水印图像'); withmark=result; subplot(2,3,4); imshow(result,[]); title('图像'); withmark=result; %------------------------水印提取-----------------------------% % after_2=blkproc(withmark,[8,8],'dct2'); %此步开始提取水印,将灰度层分块进行DCT变换 p=zeros(1,8); %初始化提取数值用的矩阵 mark_2 = zeros(rm,cm); for i=1:rm for j=1:cm x=(i-1)*8;y=(j-1)*8; p(1)=after_2(x+1,y+8); %将之前改变过数值的点的数值提取出来 p(2)=after_2(x+2,y+7); p(3)=after_2(x+3,y+6); p(4)=after_2(x+4,y+5); p(5)=after_2(x+5,y+4); p(6)=after_2(x+6,y+3); p(7)=after_2(x+7,y+2); p(8)=after_2(x+8,y+1); if corr2(p,k1)>corr2(p,k2) %corr2计算两个矩阵的相似度,越接近1相似度越大 mark_2(i,j)=1; %比较提取出来的数值与随机频率k1和k2的相似度,还原水印图样 else mark_2(i,j)=0; end end end subplot(2,3,5); mark_2 = uint8(mark_2); imshow(mark_2,[]),title('提取水印'); subplot(2,3,6); imshow(mark),title('原水印图像');
normal
{ "blob_id": "56d3e59e3e077b1febb834668aba44ce8dba13ae", "index": 635, "step-1": "clear ;\nclc;\n \n%-----------------------读入图像-------------------------------------%\nmarkbefore=imread('p203.bmp');\nmarkbefore2=rgb2gray(markbefore);\nmark=im2bw(markbefore2); \nfigure(1); \nsubplot(2,3,1); \nimshow(mark),title('水印图像'); \n[rm,cm]=size(mark); \ncover=imread('pic.bmp');\ncover1=imresize(cover,[512,512]);\ncover_image=rgb2gray(cover1);\nsubplot(2,3,2),imshow(cover_image,[]),title('原始图像'); \n \nbefore=blkproc(cover_image,[8 8],'dct2'); %将载体图像的灰度层分为8×8的小块,每一块内做二维DCT变换,结果记入矩阵before\nI=mark;\nalpha=50; %尺度因子,控制水印添加的强度,决定了频域系数被修改的幅度\nk1=randn(1,8); %产生两个不同的随机序列\nk2=randn(1,8);\nafter=before; %初始化载入水印的结果矩阵\nfor i=1:rm %在中频段嵌入水印\n for j=1:cm\n x=(i-1)*8;\n y=(j-1)*8;\n if mark(i,j)==1\n k=k1;\n else\n k=k2;\n end;\n after(x+1,y+8)=before(x+1,y+8)+alpha*k(1);\n after(x+2,y+7)=before(x+2,y+7)+alpha*k(2);\n after(x+3,y+6)=before(x+3,y+6)+alpha*k(3);\n after(x+4,y+5)=before(x+4,y+5)+alpha*k(4);\n after(x+5,y+4)=before(x+5,y+4)+alpha*k(5);\n after(x+6,y+3)=before(x+6,y+3)+alpha*k(6);\n after(x+7,y+2)=before(x+7,y+2)+alpha*k(7);\n after(x+8,y+1)=before(x+8,y+1)+alpha*k(8);\n end;\nend;\nresult=blkproc(after,[8 8],'idct2'); %将经处理的图像分为8×8的小块,每一块内做二维DCT逆变换\nresult = uint8(result);\nimwrite(result,'watermarked.bmp','bmp'); %隐写图像命名为watermarked.bmp\nsubplot(2,3,3),imshow(result,[]),title('隐写图像'); \n\n\n subplot(2,3,4);\n imshow(result,[]);\n title('水印图像');\n withmark=result;\n subplot(2,3,4);\n imshow(result,[]);\n title('图像');\n withmark=result;\n\n \n%------------------------水印提取-----------------------------%\n%\nafter_2=blkproc(withmark,[8,8],'dct2'); %此步开始提取水印,将灰度层分块进行DCT变换\np=zeros(1,8); %初始化提取数值用的矩阵\nmark_2 = zeros(rm,cm);\nfor i=1:rm\n for j=1:cm\n x=(i-1)*8;y=(j-1)*8;\n p(1)=after_2(x+1,y+8); %将之前改变过数值的点的数值提取出来\n p(2)=after_2(x+2,y+7);\n p(3)=after_2(x+3,y+6);\n p(4)=after_2(x+4,y+5);\n p(5)=after_2(x+5,y+4);\n p(6)=after_2(x+6,y+3);\n p(7)=after_2(x+7,y+2);\n p(8)=after_2(x+8,y+1);\n if corr2(p,k1)>corr2(p,k2) %corr2计算两个矩阵的相似度,越接近1相似度越大\n mark_2(i,j)=1; %比较提取出来的数值与随机频率k1和k2的相似度,还原水印图样\n else\n mark_2(i,j)=0;\n end\n end\nend\nsubplot(2,3,5);\nmark_2 = uint8(mark_2);\nimshow(mark_2,[]),title('提取水印');\nsubplot(2,3,6);\nimshow(mark),title('原水印图像');\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/env python3 # given a set A and n other sets. # find whether set A is a strict superset of each of the n sets # print True if yes, otherwise False A = set(map(int, input().split())) b = [] for _ in range(int(input())): b.append(A > set(map(int, input().split()))) print(all(b))
normal
{ "blob_id": "a9eb2b3f26396918c792de3f126e51bde334b709", "index": 7777, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n", "step-3": "A = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n", "step-4": "#!/usr/bin/env python3\n\n# given a set A and n other sets.\n# find whether set A is a strict superset of each of the n sets\n# print True if yes, otherwise False\n\nA = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\n\nprint(all(b))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
""" module : watcher.py description : Script to automatically watch a directory (via watchdog) for tests and run them via py.test """ import sys import os.path import subprocess import time from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler class SpecificationsEventHandler(FileSystemEventHandler): """Runs the tests inside the specifications class when any specification file is modified """ def __init__(self): self.paused = False self.banner = "============================================================" def on_modified(self, event): super(SpecificationsEventHandler, self).on_modified(event) """ Description: Catches the file modified event from the watchdog package and creates the full path to the file for submission to the test engine of choice. Args: event: Contains the information for the file system event when modification has occurred """ # file modified triggers directory modified as well... if event.is_directory: return if self.paused: return if event.src_path.endswith("_specs.py") and not self.paused: self.paused = True #filename = os.path.basename(event.src_path) directory = os.path.abspath(os.path.dirname(event.src_path)) filename = os.path.basename(event.src_path) file = os.path.join(directory, filename) print(self.banner, end="\n") print("testing specifications found in file: {0}".format(file)) print("") # if using pytest, uncomment the line below #subprocess.call(['py.test', '-v', file], shell=True) #using mamba as the test engine: subprocess.call(['mamba', file], shell=True) print(self.banner, end="\n") self.paused = False return if __name__ == "__main__": path = sys.argv[1] event_handler = SpecificationsEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
normal
{ "blob_id": "95ea8a21d3ac44c7760179bc4ebf67f0c16e6a19", "index": 2421, "step-1": "<mask token>\n\n\nclass SpecificationsEventHandler(FileSystemEventHandler):\n <mask token>\n\n def __init__(self):\n self.paused = False\n self.banner = (\n '============================================================')\n\n def on_modified(self, event):\n super(SpecificationsEventHandler, self).on_modified(event)\n \"\"\"\n\t\t\tDescription:\n\t\t\t\tCatches the file modified event from the watchdog package and \n\t\t\t\tcreates the full path to the file for submission to the test engine \n\t\t\t\tof choice.\n\t\t\t\t\n\t\t\tArgs:\n\t\t\t\tevent: Contains the information for the file system event \n\t\t\t\twhen modification has occurred\n\t\t\"\"\"\n if event.is_directory:\n return\n if self.paused:\n return\n if event.src_path.endswith('_specs.py') and not self.paused:\n self.paused = True\n directory = os.path.abspath(os.path.dirname(event.src_path))\n filename = os.path.basename(event.src_path)\n file = os.path.join(directory, filename)\n print(self.banner, end='\\n')\n print('testing specifications found in file: {0}'.format(file))\n print('')\n subprocess.call(['mamba', file], shell=True)\n print(self.banner, end='\\n')\n self.paused = False\n return\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass SpecificationsEventHandler(FileSystemEventHandler):\n \"\"\"Runs the tests inside the specifications class when any specification file is modified\n\t\"\"\"\n\n def __init__(self):\n self.paused = False\n self.banner = (\n '============================================================')\n\n def on_modified(self, event):\n super(SpecificationsEventHandler, self).on_modified(event)\n \"\"\"\n\t\t\tDescription:\n\t\t\t\tCatches the file modified event from the watchdog package and \n\t\t\t\tcreates the full path to the file for submission to the test engine \n\t\t\t\tof choice.\n\t\t\t\t\n\t\t\tArgs:\n\t\t\t\tevent: Contains the information for the file system event \n\t\t\t\twhen modification has occurred\n\t\t\"\"\"\n if event.is_directory:\n return\n if self.paused:\n return\n if event.src_path.endswith('_specs.py') and not self.paused:\n self.paused = True\n directory = os.path.abspath(os.path.dirname(event.src_path))\n filename = os.path.basename(event.src_path)\n file = os.path.join(directory, filename)\n print(self.banner, end='\\n')\n print('testing specifications found in file: {0}'.format(file))\n print('')\n subprocess.call(['mamba', file], shell=True)\n print(self.banner, end='\\n')\n self.paused = False\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SpecificationsEventHandler(FileSystemEventHandler):\n \"\"\"Runs the tests inside the specifications class when any specification file is modified\n\t\"\"\"\n\n def __init__(self):\n self.paused = False\n self.banner = (\n '============================================================')\n\n def on_modified(self, event):\n super(SpecificationsEventHandler, self).on_modified(event)\n \"\"\"\n\t\t\tDescription:\n\t\t\t\tCatches the file modified event from the watchdog package and \n\t\t\t\tcreates the full path to the file for submission to the test engine \n\t\t\t\tof choice.\n\t\t\t\t\n\t\t\tArgs:\n\t\t\t\tevent: Contains the information for the file system event \n\t\t\t\twhen modification has occurred\n\t\t\"\"\"\n if event.is_directory:\n return\n if self.paused:\n return\n if event.src_path.endswith('_specs.py') and not self.paused:\n self.paused = True\n directory = os.path.abspath(os.path.dirname(event.src_path))\n filename = os.path.basename(event.src_path)\n file = os.path.join(directory, filename)\n print(self.banner, end='\\n')\n print('testing specifications found in file: {0}'.format(file))\n print('')\n subprocess.call(['mamba', file], shell=True)\n print(self.banner, end='\\n')\n self.paused = False\n return\n\n\nif __name__ == '__main__':\n path = sys.argv[1]\n event_handler = SpecificationsEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n", "step-4": "<mask token>\nimport sys\nimport os.path\nimport subprocess\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\nclass SpecificationsEventHandler(FileSystemEventHandler):\n \"\"\"Runs the tests inside the specifications class when any specification file is modified\n\t\"\"\"\n\n def __init__(self):\n self.paused = False\n self.banner = (\n '============================================================')\n\n def on_modified(self, event):\n super(SpecificationsEventHandler, self).on_modified(event)\n \"\"\"\n\t\t\tDescription:\n\t\t\t\tCatches the file modified event from the watchdog package and \n\t\t\t\tcreates the full path to the file for submission to the test engine \n\t\t\t\tof choice.\n\t\t\t\t\n\t\t\tArgs:\n\t\t\t\tevent: Contains the information for the file system event \n\t\t\t\twhen modification has occurred\n\t\t\"\"\"\n if event.is_directory:\n return\n if self.paused:\n return\n if event.src_path.endswith('_specs.py') and not self.paused:\n self.paused = True\n directory = os.path.abspath(os.path.dirname(event.src_path))\n filename = os.path.basename(event.src_path)\n file = os.path.join(directory, filename)\n print(self.banner, end='\\n')\n print('testing specifications found in file: {0}'.format(file))\n print('')\n subprocess.call(['mamba', file], shell=True)\n print(self.banner, end='\\n')\n self.paused = False\n return\n\n\nif __name__ == '__main__':\n path = sys.argv[1]\n event_handler = SpecificationsEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n", "step-5": "\"\"\"\nmodule\t\t\t: watcher.py\ndescription\t: Script to automatically watch a directory (via watchdog) for tests and run them via py.test\n\"\"\"\nimport sys\nimport os.path\nimport subprocess\nimport time\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\nclass SpecificationsEventHandler(FileSystemEventHandler):\n\t\"\"\"Runs the tests inside the specifications class when any specification file is modified\n\t\"\"\"\n\t\n\tdef __init__(self): \n\t\tself.paused = False\n\t\tself.banner = \"============================================================\"\n \n\tdef on_modified(self, event):\n\t\tsuper(SpecificationsEventHandler, self).on_modified(event)\n\t\t\"\"\"\n\t\t\tDescription:\n\t\t\t\tCatches the file modified event from the watchdog package and \n\t\t\t\tcreates the full path to the file for submission to the test engine \n\t\t\t\tof choice.\n\t\t\t\t\n\t\t\tArgs:\n\t\t\t\tevent: Contains the information for the file system event \n\t\t\t\twhen modification has occurred\n\t\t\"\"\"\n\t\t\n\t\t\n\t\t# file modified triggers directory modified as well...\t\t\n\t\tif event.is_directory:\n\t\t\treturn\n\n\t\tif self.paused: \n\t\t\treturn\n\n\t\tif event.src_path.endswith(\"_specs.py\") and not self.paused:\n\t\t\tself.paused = True\n\t\t\t#filename = os.path.basename(event.src_path)\n\t\t\tdirectory = os.path.abspath(os.path.dirname(event.src_path))\n\t\t\tfilename = os.path.basename(event.src_path)\n\t\t\tfile = os.path.join(directory, filename)\n\n\t\t\tprint(self.banner, end=\"\\n\")\n\t\t\tprint(\"testing specifications found in file: {0}\".format(file))\n\t\t\tprint(\"\")\n\t\t\t\n\t\t\t# if using pytest, uncomment the line below\n\t\t\t#subprocess.call(['py.test', '-v', file], shell=True)\t\n\t\t\t\n\t\t\t#using mamba as the test engine:\n\t\t\tsubprocess.call(['mamba', file], shell=True)\t\n\n\t\t\tprint(self.banner, end=\"\\n\")\n\n\t\t\tself.paused = False\n\t\t\treturn\n\n\nif __name__ == \"__main__\":\n path = sys.argv[1]\n event_handler = SpecificationsEventHandler()\n observer = Observer()\n observer.schedule(event_handler, path, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join() \n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import random from typing import List from faker import Faker from call_center.src.actors.agent import InsuranceAgent from call_center.src.actors.consumer import Consumer from call_center.src.common.person import ( AGE, AVAILABLE, INSURANCE_OPERATION, PHONE_NUMBER, INCOME, CARS_COUNT, KIDS_COUNT, STATE, RENT, BUY, ) from call_center.src.common.singleton_meta import SingletonMeta CONSUMER_COUNT = 1000 AGENTS_COUNT = 20 FAKE = Faker("en_US") class ActorsCreator(metaclass=SingletonMeta): """ Singleton class which acts as a container for both Agents and Consumers. In a real-world scenario, we would have a database containing both actors/consumers. This is a replacement, for the sake of example. """ def __init__(self): self.consumers = ActorsCreator.create_consumers() self.agents = ActorsCreator.create_agents() def __del__(self): self.stop_all_agents() @staticmethod def create_consumers() -> List[Consumer]: """ Create the consumers. Consumers are created with randomized attributes. :return: A new list of Consumer. """ consumers = [] for consumer in range(CONSUMER_COUNT): consumers.append( Consumer( { AGE: FAKE.random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int(min=0, max=10), INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME: FAKE.random_int(min=0, max=99999999999), PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True, } ) ) return consumers @staticmethod def create_agents() -> List[InsuranceAgent]: """ Create the InsuranceAgents. Consumers are created with randomized attributes. :return: A new list of InsuranceAgent. """ agents = [] for consumer in range(AGENTS_COUNT): insurance_agent = InsuranceAgent( personal_info={ AGE: FAKE.random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int(min=0, max=10), INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME: FAKE.random_int(min=0, max=1000000), PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True, }, call_acceptance_criteria=[ { "person_attribute": AGE, "comparison_operator": random.choice(("<", ">")), "value": FAKE.random_int( min=0, max=120, ), }, { "person_attribute": INCOME, "comparison_operator": random.choice(("<", ">")), "value": FAKE.random_int( min=0, max=1000000, ), }, { "person_attribute": KIDS_COUNT, "comparison_operator": random.choice(("<", ">")), "value": FAKE.random_int( min=0, max=12, ), }, { "person_attribute": CARS_COUNT, "comparison_operator": random.choice(("<", ">")), "value": FAKE.random_int( min=0, max=12, ), }, { "person_attribute": INSURANCE_OPERATION, "comparison_operator": random.choice(("<", ">")), "value": random.choice((RENT, BUY)), }, ], ) agents.append(insurance_agent) return agents def stop_all_agents(self): """ Gracefully stop all agents threads on self deletion. To find more on agents' threads, see agent.py :return: """ for agent in self.agents: if agent.available: agent.stop_activity()
normal
{ "blob_id": "db31a69c57f773a79e5eaa8b3443b0366fd74861", "index": 8565, "step-1": "<mask token>\n\n\nclass ActorsCreator(metaclass=SingletonMeta):\n <mask token>\n\n def __init__(self):\n self.consumers = ActorsCreator.create_consumers()\n self.agents = ActorsCreator.create_agents()\n\n def __del__(self):\n self.stop_all_agents()\n\n @staticmethod\n def create_consumers() ->List[Consumer]:\n \"\"\"\n Create the consumers. Consumers are created with randomized attributes.\n :return: A new list of Consumer.\n \"\"\"\n consumers = []\n for consumer in range(CONSUMER_COUNT):\n consumers.append(Consumer({AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max\n =12), CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME:\n FAKE.random_int(min=0, max=99999999999), PHONE_NUMBER: FAKE\n .phone_number(), AVAILABLE: True}))\n return consumers\n\n @staticmethod\n def create_agents() ->List[InsuranceAgent]:\n \"\"\"\n Create the InsuranceAgents. Consumers are created with randomized attributes.\n :return: A new list of InsuranceAgent.\n \"\"\"\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(personal_info={AGE: FAKE.\n random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT:\n FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int\n (min=0, max=10), INSURANCE_OPERATION: random.choice((RENT,\n BUY)), INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True},\n call_acceptance_criteria=[{'person_attribute': AGE,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=120)}, {'person_attribute':\n INCOME, 'comparison_operator': random.choice(('<', '>')),\n 'value': FAKE.random_int(min=0, max=1000000)}, {\n 'person_attribute': KIDS_COUNT, 'comparison_operator':\n random.choice(('<', '>')), 'value': FAKE.random_int(min=0,\n max=12)}, {'person_attribute': CARS_COUNT,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=12)}, {'person_attribute':\n INSURANCE_OPERATION, 'comparison_operator': random.choice((\n '<', '>')), 'value': random.choice((RENT, BUY))}])\n agents.append(insurance_agent)\n return agents\n\n def stop_all_agents(self):\n \"\"\"\n Gracefully stop all agents threads on self deletion.\n To find more on agents' threads, see agent.py\n :return:\n \"\"\"\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()\n", "step-2": "<mask token>\n\n\nclass ActorsCreator(metaclass=SingletonMeta):\n \"\"\"\n Singleton class which acts as a container for both Agents and Consumers.\n In a real-world scenario, we would have a database containing both actors/consumers.\n This is a replacement, for the sake of example.\n \"\"\"\n\n def __init__(self):\n self.consumers = ActorsCreator.create_consumers()\n self.agents = ActorsCreator.create_agents()\n\n def __del__(self):\n self.stop_all_agents()\n\n @staticmethod\n def create_consumers() ->List[Consumer]:\n \"\"\"\n Create the consumers. Consumers are created with randomized attributes.\n :return: A new list of Consumer.\n \"\"\"\n consumers = []\n for consumer in range(CONSUMER_COUNT):\n consumers.append(Consumer({AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max\n =12), CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME:\n FAKE.random_int(min=0, max=99999999999), PHONE_NUMBER: FAKE\n .phone_number(), AVAILABLE: True}))\n return consumers\n\n @staticmethod\n def create_agents() ->List[InsuranceAgent]:\n \"\"\"\n Create the InsuranceAgents. Consumers are created with randomized attributes.\n :return: A new list of InsuranceAgent.\n \"\"\"\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(personal_info={AGE: FAKE.\n random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT:\n FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int\n (min=0, max=10), INSURANCE_OPERATION: random.choice((RENT,\n BUY)), INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True},\n call_acceptance_criteria=[{'person_attribute': AGE,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=120)}, {'person_attribute':\n INCOME, 'comparison_operator': random.choice(('<', '>')),\n 'value': FAKE.random_int(min=0, max=1000000)}, {\n 'person_attribute': KIDS_COUNT, 'comparison_operator':\n random.choice(('<', '>')), 'value': FAKE.random_int(min=0,\n max=12)}, {'person_attribute': CARS_COUNT,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=12)}, {'person_attribute':\n INSURANCE_OPERATION, 'comparison_operator': random.choice((\n '<', '>')), 'value': random.choice((RENT, BUY))}])\n agents.append(insurance_agent)\n return agents\n\n def stop_all_agents(self):\n \"\"\"\n Gracefully stop all agents threads on self deletion.\n To find more on agents' threads, see agent.py\n :return:\n \"\"\"\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()\n", "step-3": "<mask token>\nCONSUMER_COUNT = 1000\nAGENTS_COUNT = 20\nFAKE = Faker('en_US')\n\n\nclass ActorsCreator(metaclass=SingletonMeta):\n \"\"\"\n Singleton class which acts as a container for both Agents and Consumers.\n In a real-world scenario, we would have a database containing both actors/consumers.\n This is a replacement, for the sake of example.\n \"\"\"\n\n def __init__(self):\n self.consumers = ActorsCreator.create_consumers()\n self.agents = ActorsCreator.create_agents()\n\n def __del__(self):\n self.stop_all_agents()\n\n @staticmethod\n def create_consumers() ->List[Consumer]:\n \"\"\"\n Create the consumers. Consumers are created with randomized attributes.\n :return: A new list of Consumer.\n \"\"\"\n consumers = []\n for consumer in range(CONSUMER_COUNT):\n consumers.append(Consumer({AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max\n =12), CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME:\n FAKE.random_int(min=0, max=99999999999), PHONE_NUMBER: FAKE\n .phone_number(), AVAILABLE: True}))\n return consumers\n\n @staticmethod\n def create_agents() ->List[InsuranceAgent]:\n \"\"\"\n Create the InsuranceAgents. Consumers are created with randomized attributes.\n :return: A new list of InsuranceAgent.\n \"\"\"\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(personal_info={AGE: FAKE.\n random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT:\n FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int\n (min=0, max=10), INSURANCE_OPERATION: random.choice((RENT,\n BUY)), INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True},\n call_acceptance_criteria=[{'person_attribute': AGE,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=120)}, {'person_attribute':\n INCOME, 'comparison_operator': random.choice(('<', '>')),\n 'value': FAKE.random_int(min=0, max=1000000)}, {\n 'person_attribute': KIDS_COUNT, 'comparison_operator':\n random.choice(('<', '>')), 'value': FAKE.random_int(min=0,\n max=12)}, {'person_attribute': CARS_COUNT,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=12)}, {'person_attribute':\n INSURANCE_OPERATION, 'comparison_operator': random.choice((\n '<', '>')), 'value': random.choice((RENT, BUY))}])\n agents.append(insurance_agent)\n return agents\n\n def stop_all_agents(self):\n \"\"\"\n Gracefully stop all agents threads on self deletion.\n To find more on agents' threads, see agent.py\n :return:\n \"\"\"\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()\n", "step-4": "import random\nfrom typing import List\nfrom faker import Faker\nfrom call_center.src.actors.agent import InsuranceAgent\nfrom call_center.src.actors.consumer import Consumer\nfrom call_center.src.common.person import AGE, AVAILABLE, INSURANCE_OPERATION, PHONE_NUMBER, INCOME, CARS_COUNT, KIDS_COUNT, STATE, RENT, BUY\nfrom call_center.src.common.singleton_meta import SingletonMeta\nCONSUMER_COUNT = 1000\nAGENTS_COUNT = 20\nFAKE = Faker('en_US')\n\n\nclass ActorsCreator(metaclass=SingletonMeta):\n \"\"\"\n Singleton class which acts as a container for both Agents and Consumers.\n In a real-world scenario, we would have a database containing both actors/consumers.\n This is a replacement, for the sake of example.\n \"\"\"\n\n def __init__(self):\n self.consumers = ActorsCreator.create_consumers()\n self.agents = ActorsCreator.create_agents()\n\n def __del__(self):\n self.stop_all_agents()\n\n @staticmethod\n def create_consumers() ->List[Consumer]:\n \"\"\"\n Create the consumers. Consumers are created with randomized attributes.\n :return: A new list of Consumer.\n \"\"\"\n consumers = []\n for consumer in range(CONSUMER_COUNT):\n consumers.append(Consumer({AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(), KIDS_COUNT: FAKE.random_int(min=0, max\n =12), CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)), INCOME:\n FAKE.random_int(min=0, max=99999999999), PHONE_NUMBER: FAKE\n .phone_number(), AVAILABLE: True}))\n return consumers\n\n @staticmethod\n def create_agents() ->List[InsuranceAgent]:\n \"\"\"\n Create the InsuranceAgents. Consumers are created with randomized attributes.\n :return: A new list of InsuranceAgent.\n \"\"\"\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(personal_info={AGE: FAKE.\n random_int(min=0, max=120), STATE: FAKE.state(), KIDS_COUNT:\n FAKE.random_int(min=0, max=12), CARS_COUNT: FAKE.random_int\n (min=0, max=10), INSURANCE_OPERATION: random.choice((RENT,\n BUY)), INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(), AVAILABLE: True},\n call_acceptance_criteria=[{'person_attribute': AGE,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=120)}, {'person_attribute':\n INCOME, 'comparison_operator': random.choice(('<', '>')),\n 'value': FAKE.random_int(min=0, max=1000000)}, {\n 'person_attribute': KIDS_COUNT, 'comparison_operator':\n random.choice(('<', '>')), 'value': FAKE.random_int(min=0,\n max=12)}, {'person_attribute': CARS_COUNT,\n 'comparison_operator': random.choice(('<', '>')), 'value':\n FAKE.random_int(min=0, max=12)}, {'person_attribute':\n INSURANCE_OPERATION, 'comparison_operator': random.choice((\n '<', '>')), 'value': random.choice((RENT, BUY))}])\n agents.append(insurance_agent)\n return agents\n\n def stop_all_agents(self):\n \"\"\"\n Gracefully stop all agents threads on self deletion.\n To find more on agents' threads, see agent.py\n :return:\n \"\"\"\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()\n", "step-5": "import random\n\nfrom typing import List\n\nfrom faker import Faker\n\nfrom call_center.src.actors.agent import InsuranceAgent\nfrom call_center.src.actors.consumer import Consumer\nfrom call_center.src.common.person import (\n AGE,\n AVAILABLE,\n INSURANCE_OPERATION,\n PHONE_NUMBER,\n INCOME,\n CARS_COUNT,\n KIDS_COUNT,\n STATE,\n RENT,\n BUY,\n)\nfrom call_center.src.common.singleton_meta import SingletonMeta\n\nCONSUMER_COUNT = 1000\nAGENTS_COUNT = 20\nFAKE = Faker(\"en_US\")\n\n\nclass ActorsCreator(metaclass=SingletonMeta):\n \"\"\"\n Singleton class which acts as a container for both Agents and Consumers.\n In a real-world scenario, we would have a database containing both actors/consumers.\n This is a replacement, for the sake of example.\n \"\"\"\n\n def __init__(self):\n self.consumers = ActorsCreator.create_consumers()\n self.agents = ActorsCreator.create_agents()\n\n def __del__(self):\n self.stop_all_agents()\n\n @staticmethod\n def create_consumers() -> List[Consumer]:\n \"\"\"\n Create the consumers. Consumers are created with randomized attributes.\n :return: A new list of Consumer.\n \"\"\"\n consumers = []\n for consumer in range(CONSUMER_COUNT):\n consumers.append(\n Consumer(\n {\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=99999999999),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n }\n )\n )\n return consumers\n\n @staticmethod\n def create_agents() -> List[InsuranceAgent]:\n \"\"\"\n Create the InsuranceAgents. Consumers are created with randomized attributes.\n :return: A new list of InsuranceAgent.\n \"\"\"\n agents = []\n for consumer in range(AGENTS_COUNT):\n insurance_agent = InsuranceAgent(\n personal_info={\n AGE: FAKE.random_int(min=0, max=120),\n STATE: FAKE.state(),\n KIDS_COUNT: FAKE.random_int(min=0, max=12),\n CARS_COUNT: FAKE.random_int(min=0, max=10),\n INSURANCE_OPERATION: random.choice((RENT, BUY)),\n INCOME: FAKE.random_int(min=0, max=1000000),\n PHONE_NUMBER: FAKE.phone_number(),\n AVAILABLE: True,\n },\n call_acceptance_criteria=[\n {\n \"person_attribute\": AGE,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=120,\n ),\n },\n {\n \"person_attribute\": INCOME,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=1000000,\n ),\n },\n {\n \"person_attribute\": KIDS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": CARS_COUNT,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": FAKE.random_int(\n min=0,\n max=12,\n ),\n },\n {\n \"person_attribute\": INSURANCE_OPERATION,\n \"comparison_operator\": random.choice((\"<\", \">\")),\n \"value\": random.choice((RENT, BUY)),\n },\n ],\n )\n agents.append(insurance_agent)\n return agents\n\n def stop_all_agents(self):\n \"\"\"\n Gracefully stop all agents threads on self deletion.\n To find more on agents' threads, see agent.py\n :return:\n \"\"\"\n for agent in self.agents:\n if agent.available:\n agent.stop_activity()\n", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
from django.shortcuts import render, HttpResponseRedirect, HttpResponse from django.views.generic import View from django.contrib.auth import login from django.contrib.auth.models import User class RegisterView(View): def get(self, request): return render(request, 'users/register.html', locals()) def post(self, request): try: user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get( 'last_name'), email=request.POST.get('email'), username=request.POST.get('email')) user.set_password(request.POST.get('password')) user.save() except Exception as e: print(e) return render(request, 'users/register.html', locals()) return HttpResponseRedirect('/users/login') class HomeView(View): def get(self, request): return HttpResponse(f"Home Page | Logged in as - {request.user}")
normal
{ "blob_id": "c9191df0fc04818b4df9c93a9479f75a60688aa9", "index": 6372, "step-1": "<mask token>\n\n\nclass RegisterView(View):\n <mask token>\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n", "step-2": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n <mask token>\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n", "step-3": "<mask token>\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n", "step-4": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'),\n last_name=request.POST.get('last_name'), email=request.POST\n .get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n\n def get(self, request):\n return HttpResponse(f'Home Page | Logged in as - {request.user}')\n", "step-5": "from django.shortcuts import render, HttpResponseRedirect, HttpResponse\nfrom django.views.generic import View\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\n\n\nclass RegisterView(View):\n def get(self, request):\n return render(request, 'users/register.html', locals())\n\n def post(self, request):\n try:\n user = User(first_name=request.POST.get('first_name'), last_name=request.POST.get(\n 'last_name'), email=request.POST.get('email'), username=request.POST.get('email'))\n user.set_password(request.POST.get('password'))\n user.save()\n except Exception as e:\n print(e)\n return render(request, 'users/register.html', locals())\n\n return HttpResponseRedirect('/users/login')\n\n\nclass HomeView(View):\n def get(self, request):\n return HttpResponse(f\"Home Page | Logged in as - {request.user}\")\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
from django.urls import path from photo.api.views import api_photo_detail_view, api_photos_view urlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'), path('', api_photos_view, name='users')]
normal
{ "blob_id": "ab4145ccc0b360dcca9b9aa6ebe919bdddac65a2", "index": 3962, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n", "step-3": "from django.urls import path\nfrom photo.api.views import api_photo_detail_view, api_photos_view\nurlpatterns = [path('<int:id>', api_photo_detail_view, name='user_detail'),\n path('', api_photos_view, name='users')]\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from collections import defaultdict, Counter import numpy as np import sys import re def parseFile(file, frequency_tree): readnumber = re.compile('[r]+\d+') line_spliter = re.compile('\t+') colon_spliter = re.compile(':') forward_reads = 0 reverse_reads = 0 unmatched_reads = 0 read_positions = defaultdict(list) position_differences = [] position_differences_stdv_list = [] total_position_diffs = [] read_lengths_count = 0 read_lengths_total = 0 read_frequency = 0 read_lengths_average = 0 num_chromosomes = 0 num_a = 0 num_c = 0 num_g = 0 num_t = 0 print("############# OPENING SAM FILE", file=sys.stderr) with open(file, 'rt') as fp: line = fp.readline() while line: subline = line_spliter.split(line) line = fp.readline() if (int(subline[1]) & 4 == 4): unmatched_reads += 1 elif (int(subline[1]) & 16 == 16): reverse_reads += 1 else: forward_reads += 1 read = subline[9] read_lengths_count += 1 read_lengths_total += len(read) bases_count = Counter(read) num_a += bases_count["A"] num_c += bases_count["C"] num_g += bases_count["G"] num_t += bases_count["T"] chromosome = getChromosome(subline[2]) if chromosome != -1: read_positions[chromosome].append(int(subline[3])) if read_lengths_count != 0: read_lengths_average = read_lengths_total / read_lengths_count if (forward_reads + reverse_reads + unmatched_reads) != 0: read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads) gene_annotation_match = 0 gene_annotation_total = 0 gene_annotation_percent = 0 for key in read_positions.keys(): for position in read_positions[key]: #TODO there is for sure a better way to do this than with a break for _ in frequency_tree[key].find_overlap(position, position): gene_annotation_match += 1 break gene_annotation_total += 1 if gene_annotation_total != 0: gene_annotation_percent = gene_annotation_match / gene_annotation_total print("gene_annotation_percent = " + str(gene_annotation_percent)) for _, position_list in read_positions.items(): position_list.sort() num_chromosomes += 1 for i in range(len(position_list) - 1): position_differences.append(position_list[i + 1] - position_list[i]) try: std_of_pos_diff = np.std(position_differences) mean_of_pos_diffs = np.nanmean(position_differences) max_position_difference = np.amax(position_differences) min_position_difference = np.amin(position_differences) except: return None return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference, num_a/ read_lengths_total, num_c/ read_lengths_total, num_g / read_lengths_total, num_t / read_lengths_total] def parseString(txt, frequency_tree): spliter = re.compile('\n+') readnumber = re.compile('[r]+\d+') line_spliter = re.compile('\t+') colon_spliter = re.compile(':') forward_reads = 0 reverse_reads = 0 unmatched_reads = 0 read_positions = defaultdict(list) position_differences = [] position_differences_stdv_list = [] total_position_diffs = [] read_lengths_count = 0 read_lengths_total = 0 read_frequency = 0 read_lengths_average = 0 num_chromosomes = 0 lines = spliter.split(txt) #Itterating though everyline for i in range(len(lines) - 1): subline = line_spliter.split(lines[i]) if (int(subline[1]) & 4 == 4): unmatched_reads += 1 elif (int(subline[1]) & 16 == 16): reverse_reads += 1 else: forward_reads += 1 read = subline[9] read_lengths_count += 1 read_lengths_total += len(read) chromosome = getChromosome(subline[2]) if chromosome != -1: read_positions[chromosome].append(int(subline[3])) if read_lengths_count != 0: read_lengths_average = read_lengths_total / read_lengths_count if (forward_reads + reverse_reads + unmatched_reads) != 0: read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads) gene_annotation_match = 0 gene_annotation_total = 0 gene_annotation_percent = 0 for key in read_positions.keys(): for position in read_positions[key]: #TODO there is for sure a better way to do this than with a break for _ in frequency_tree[key].find_overlap(position, position): gene_annotation_match += 1 break gene_annotation_total += 1 if gene_annotation_total != 0: gene_annotation_percent = gene_annotation_match / gene_annotation_total print("gene_annotation_percent = " + str(gene_annotation_percent)) for _, position_list in read_positions.items(): position_list.sort() num_chromosomes += 1 for i in range(len(position_list) - 1): position_differences.append(position_list[i + 1] - position_list[i]) try: std_of_pos_diff = np.std(position_differences) mean_of_pos_diffs = np.nanmean(position_differences) max_position_difference = np.amax(position_differences) min_position_difference = np.amin(position_differences) except: return None return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference] def getChromosome(str): if str == "*" or str[3:] == 'X': return -1 try: return int(str[3:]) except: return -1
normal
{ "blob_id": "227b71cb6d4cde8f498ad19c1c5f95f7fc572752", "index": 6995, "step-1": "<mask token>\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n", "step-2": "<mask token>\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\n<mask token>\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n", "step-3": "<mask token>\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n lines = spliter.split(txt)\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads +\n reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i]\n )\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency,\n std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,\n max_position_difference, min_position_difference]\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n", "step-4": "from collections import defaultdict, Counter\nimport numpy as np\nimport sys\nimport re\n\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n print('############# OPENING SAM FILE', file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count['A']\n num_c += bases_count['C']\n num_g += bases_count['G']\n num_t += bases_count['T']\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (\n forward_reads + reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = (gene_annotation_match /\n gene_annotation_total)\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] -\n position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average,\n read_frequency, std_of_pos_diff, mean_of_pos_diffs,\n num_chromosomes, max_position_difference,\n min_position_difference, num_a / read_lengths_total, num_c /\n read_lengths_total, num_g / read_lengths_total, num_t /\n read_lengths_total]\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n lines = spliter.split(txt)\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if int(subline[1]) & 4 == 4:\n unmatched_reads += 1\n elif int(subline[1]) & 16 == 16:\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if forward_reads + reverse_reads + unmatched_reads != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads +\n reverse_reads + unmatched_reads)\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print('gene_annotation_percent = ' + str(gene_annotation_percent))\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i]\n )\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency,\n std_of_pos_diff, mean_of_pos_diffs, num_chromosomes,\n max_position_difference, min_position_difference]\n\n\ndef getChromosome(str):\n if str == '*' or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n", "step-5": "from collections import defaultdict, Counter\nimport numpy as np\nimport sys\nimport re\n\ndef parseFile(file, frequency_tree):\n readnumber = re.compile('[r]+\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n num_a = 0\n num_c = 0\n num_g = 0\n num_t = 0\n\n\n print(\"############# OPENING SAM FILE\", file=sys.stderr)\n with open(file, 'rt') as fp:\n line = fp.readline()\n while line:\n subline = line_spliter.split(line)\n line = fp.readline()\n if (int(subline[1]) & 4 == 4):\n unmatched_reads += 1\n elif (int(subline[1]) & 16 == 16):\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n bases_count = Counter(read)\n num_a += bases_count[\"A\"]\n num_c += bases_count[\"C\"]\n num_g += bases_count[\"G\"]\n num_t += bases_count[\"T\"]\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if (forward_reads + reverse_reads + unmatched_reads) != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)\n\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n #TODO there is for sure a better way to do this than with a break\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print(\"gene_annotation_percent = \" + str(gene_annotation_percent))\n\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference, num_a/ read_lengths_total, num_c/ read_lengths_total, num_g / read_lengths_total, num_t / read_lengths_total]\n\n\n\ndef parseString(txt, frequency_tree):\n spliter = re.compile('\\n+')\n readnumber = re.compile('[r]+\\d+')\n line_spliter = re.compile('\\t+')\n colon_spliter = re.compile(':')\n forward_reads = 0\n reverse_reads = 0\n unmatched_reads = 0\n read_positions = defaultdict(list)\n position_differences = []\n position_differences_stdv_list = []\n total_position_diffs = []\n read_lengths_count = 0\n read_lengths_total = 0\n read_frequency = 0\n read_lengths_average = 0\n num_chromosomes = 0\n\n lines = spliter.split(txt)\n #Itterating though everyline\n for i in range(len(lines) - 1):\n subline = line_spliter.split(lines[i])\n if (int(subline[1]) & 4 == 4):\n unmatched_reads += 1\n elif (int(subline[1]) & 16 == 16):\n reverse_reads += 1\n else:\n forward_reads += 1\n read = subline[9]\n read_lengths_count += 1\n read_lengths_total += len(read)\n chromosome = getChromosome(subline[2])\n if chromosome != -1:\n read_positions[chromosome].append(int(subline[3]))\n if read_lengths_count != 0:\n read_lengths_average = read_lengths_total / read_lengths_count\n if (forward_reads + reverse_reads + unmatched_reads) != 0:\n read_frequency = (forward_reads + reverse_reads) / (forward_reads + reverse_reads + unmatched_reads)\n\n gene_annotation_match = 0\n gene_annotation_total = 0\n gene_annotation_percent = 0\n for key in read_positions.keys():\n for position in read_positions[key]:\n #TODO there is for sure a better way to do this than with a break\n for _ in frequency_tree[key].find_overlap(position, position):\n gene_annotation_match += 1\n break\n gene_annotation_total += 1\n if gene_annotation_total != 0:\n gene_annotation_percent = gene_annotation_match / gene_annotation_total\n print(\"gene_annotation_percent = \" + str(gene_annotation_percent))\n\n for _, position_list in read_positions.items():\n position_list.sort()\n num_chromosomes += 1\n for i in range(len(position_list) - 1):\n position_differences.append(position_list[i + 1] - position_list[i])\n try:\n std_of_pos_diff = np.std(position_differences)\n mean_of_pos_diffs = np.nanmean(position_differences)\n max_position_difference = np.amax(position_differences)\n min_position_difference = np.amin(position_differences)\n except:\n return None\n return [gene_annotation_percent, read_lengths_average, read_frequency, std_of_pos_diff, mean_of_pos_diffs, num_chromosomes, max_position_difference, min_position_difference]\n\ndef getChromosome(str):\n if str == \"*\" or str[3:] == 'X':\n return -1\n try:\n return int(str[3:])\n except:\n return -1\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#! /usr/bin/env python import smtpsend S = smtpsend.Smtpsent(SUBJECT='Test') S.sendemail(''' this is a test! ''')
normal
{ "blob_id": "7754974e79202b2df4ab9a7f69948483042a67cc", "index": 855, "step-1": "<mask token>\n", "step-2": "<mask token>\nS.sendemail(\"\"\"\nthis is a test!\n\"\"\")\n", "step-3": "<mask token>\nS = smtpsend.Smtpsent(SUBJECT='Test')\nS.sendemail(\"\"\"\nthis is a test!\n\"\"\")\n", "step-4": "import smtpsend\nS = smtpsend.Smtpsent(SUBJECT='Test')\nS.sendemail(\"\"\"\nthis is a test!\n\"\"\")\n", "step-5": "#! /usr/bin/env python\n\nimport smtpsend\n\nS = smtpsend.Smtpsent(SUBJECT='Test')\nS.sendemail('''\nthis is a test!\n''')\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import json import glob import argparse from model.NewModel import runModel from collections import namedtuple import csv OutputFile = "./HealthSimOutputSheet.csv" parser = argparse.ArgumentParser(description='Select policy file') parser.add_argument('-p', type=str, default='default', help='name of a a policy file') parser.add_argument('-n', type=int, default=100000, help='number of patients') args = parser.parse_args() NumPatients = args.n policyName = args.p matchingPolicies = glob.glob(f"./policies/{policyName}*") if len(matchingPolicies) == 0: raise SystemExit(f"No matching policy named {policyName}") elif len(matchingPolicies) > 1: raise SystemExit(f"Multiple matching policies for {policyName}: {matchingPolicies}") policyFile = matchingPolicies[0] with open(policyFile, 'r') as stream: # magic to turn json into an object instead of a dict # https://stackoverflow.com/a/15882054 policySettings = json.load(stream, object_hook=lambda d: namedtuple('X', d.keys())(*d.values())) results = runModel(policySettings, NumPatients) with open(OutputFile, 'w', newline='') as csvfile: writer = csv.writer(csvfile) keys = ["Number on Private Insurance:", "Number on Medicare:", "Number on Medicaid:", "Number of Uninsured:", "Private Premium:", "Medicare Premium:", "Medicare Funds:", "Medicaid Funds:"] for key in keys: row = [key] + results['runSummary'][key] writer.writerow(row) patients = results['patients'] writer.writerow(["Patient ID", "Age", "Ethnicity", "Gender", "Education", "Income", "Income Bracket", "QALY", "Diabetes", "Diagnosed", "Controlled", "Deceased"]) for m in range(len(patients)): writer.writerow([m, patients[m].age, patients[m].ethnicity, patients[m].gender, patients[m].education, patients[m].income, patients[m].IPR, patients[m].QALY, patients[m].diabetes, patients[m].diagnosed, patients[m].controlled, patients[m].deceased])
normal
{ "blob_id": "894ce07c6443208483be2d3ef1409f12f24d99f3", "index": 2852, "step-1": "<mask token>\n", "step-2": "<mask token>\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\n<mask token>\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\n<mask token>\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\n<mask token>\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n", "step-3": "<mask token>\nOutputFile = './HealthSimOutputSheet.csv'\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\nargs = parser.parse_args()\nNumPatients = args.n\npolicyName = args.p\nmatchingPolicies = glob.glob(f'./policies/{policyName}*')\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\npolicyFile = matchingPolicies[0]\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\nresults = runModel(policySettings, NumPatients)\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n", "step-4": "import json\nimport glob\nimport argparse\nfrom model.NewModel import runModel\nfrom collections import namedtuple\nimport csv\nOutputFile = './HealthSimOutputSheet.csv'\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help=\n 'name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\nargs = parser.parse_args()\nNumPatients = args.n\npolicyName = args.p\nmatchingPolicies = glob.glob(f'./policies/{policyName}*')\nif len(matchingPolicies) == 0:\n raise SystemExit(f'No matching policy named {policyName}')\nelif len(matchingPolicies) > 1:\n raise SystemExit(\n f'Multiple matching policies for {policyName}: {matchingPolicies}')\npolicyFile = matchingPolicies[0]\nwith open(policyFile, 'r') as stream:\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X',\n d.keys())(*d.values()))\nresults = runModel(policySettings, NumPatients)\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = ['Number on Private Insurance:', 'Number on Medicare:',\n 'Number on Medicaid:', 'Number of Uninsured:', 'Private Premium:',\n 'Medicare Premium:', 'Medicare Funds:', 'Medicaid Funds:']\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n patients = results['patients']\n writer.writerow(['Patient ID', 'Age', 'Ethnicity', 'Gender',\n 'Education', 'Income', 'Income Bracket', 'QALY', 'Diabetes',\n 'Diagnosed', 'Controlled', 'Deceased'])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity,\n patients[m].gender, patients[m].education, patients[m].income,\n patients[m].IPR, patients[m].QALY, patients[m].diabetes,\n patients[m].diagnosed, patients[m].controlled, patients[m].\n deceased])\n", "step-5": "import json\nimport glob\nimport argparse\nfrom model.NewModel import runModel\nfrom collections import namedtuple\nimport csv\n\nOutputFile = \"./HealthSimOutputSheet.csv\"\n\nparser = argparse.ArgumentParser(description='Select policy file')\nparser.add_argument('-p', type=str, default='default', help='name of a a policy file')\nparser.add_argument('-n', type=int, default=100000, help='number of patients')\n\nargs = parser.parse_args()\n\nNumPatients = args.n\n\npolicyName = args.p\nmatchingPolicies = glob.glob(f\"./policies/{policyName}*\")\n\nif len(matchingPolicies) == 0:\n raise SystemExit(f\"No matching policy named {policyName}\")\nelif len(matchingPolicies) > 1:\n raise SystemExit(f\"Multiple matching policies for {policyName}: {matchingPolicies}\")\n\npolicyFile = matchingPolicies[0]\n\nwith open(policyFile, 'r') as stream:\n # magic to turn json into an object instead of a dict\n # https://stackoverflow.com/a/15882054\n policySettings = json.load(stream, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n\n\nresults = runModel(policySettings, NumPatients)\n\nwith open(OutputFile, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n keys = [\"Number on Private Insurance:\", \"Number on Medicare:\",\n \"Number on Medicaid:\", \"Number of Uninsured:\",\n \"Private Premium:\", \"Medicare Premium:\",\n \"Medicare Funds:\", \"Medicaid Funds:\"]\n\n for key in keys:\n row = [key] + results['runSummary'][key]\n writer.writerow(row)\n\n patients = results['patients']\n writer.writerow([\"Patient ID\", \"Age\", \"Ethnicity\", \"Gender\", \"Education\", \"Income\", \"Income Bracket\", \"QALY\", \"Diabetes\", \"Diagnosed\", \"Controlled\", \"Deceased\"])\n for m in range(len(patients)):\n writer.writerow([m, patients[m].age, patients[m].ethnicity, patients[m].gender, patients[m].education, patients[m].income, patients[m].IPR, patients[m].QALY, patients[m].diabetes, patients[m].diagnosed, patients[m].controlled, patients[m].deceased])\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 2.1.2 on 2018-10-25 09:36 import django.contrib.auth.models import django.contrib.auth.validators from django.db import migrations, models import django.utils.timezone import uuid class Migration(migrations.Migration): dependencies = [ ('grafit', '0002_article'), ] operations = [ migrations.RunSQL(""" INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License. 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6] On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size. Indexing Fields in a MongoDB document can be indexed with primary and secondary indices. Replication MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default. Load balancing[10] MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution. MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. '); INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to "non SQL" or "non relational")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the "NoSQL" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called "Not only SQL" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8] Motivations for this approach include: simplicity of design, simpler "horizontal" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as "more flexible" than relational database tables.[9] Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.) Instead, most NoSQL databases offer a concept of "eventual consistency" in which database changes are propagated to all nodes "eventually" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases "do not allow referential integrity constraints to span databases."[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. '); INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16] Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because "SEQUEL" was a trademark of the UK-based Hawker Siddeley aircraft company.[17] In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard "Database Language SQL" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] '); INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. "Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system." "We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services." "We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs."'); INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link. Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7). Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action. According to an advisory released by phpMyAdmin, "by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc." phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms. Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases. Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link. "A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice." Barot explains in a blog post. However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table. "If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name," Barot says. "This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc." Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible. '); INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed. When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design. "Who care about database design? What about mockups? What about workflows?" Let me tell you about "Bob''s Luxury Goods." I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a "one-to-many" relationship between customers and addresses. That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as "in use" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES. We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was "cheaper" to remove the restriction on "flagged" addresses and allow a duplicate address to be used. Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the "typo", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it. That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this. Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer. "Curtis," they said, "just enter a dummy customer called ''Occupant'' and attach all addresses to that." Except you couldn''t enter a customer without an order. Except you couldn''t enter an order without at least one item on it. Except you couldn''t enter an item unless it was listed in inventory. Except that reserved the "inventory" item and made it unavailable. Except, except, except ... It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a "paid" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember. Then, and only then, could I write the code to provide "generic" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them. If they had simply had a proper database design up front, they could have reused their existing system with little trouble. That''s what bad database design costs you and why I usually start with that before writing my software. Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.'); INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub. Learn more about actions As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you'); INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', ' The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves. Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update. Affected products GitHub Desktop GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app. Atom Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch. Ensure you’re on the latest Atom release by completing any of the following: Windows: From the toolbar, click Help -> Check for Updates MacOS: From the menu bar, click Atom -> Check for Update Linux: Update manually by downloading the latest release from atom.io Git on the command line and other clients In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other. Additional notes Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9. Details of the vulnerability This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself. The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix. The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability). The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands. We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added). Please update your copy of Git soon, and happy cloning! '); INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet. The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux. The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet. So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others. To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs. What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.'); INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres. rbanffy on Aug 18, 2012 [-] I think this would be a mistake. This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle. All that is lost is the MySQL name and brand. PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now. Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this. Udo on Aug 18, 2012 [-] I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project! MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing. Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem. Hence, sensational and petulant "RIP $PRODUCTNAME" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own. The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. '); INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts? Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.'); INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code. Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files. '); INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness. This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming. What is PostgreSQL? PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge. A Brief History of PostgreSQL PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates. 1977-1985 − A project called INGRES was developed. Proof-of-concept for relational databases Established the company Ingres in 1980 Bought by Computer Associates in 1994 1986-1994 − POSTGRES Development of the concepts in INGRES with a focus on object orientation and the query language - Quel The code base of INGRES was not used as a basis for POSTGRES Commercialized as Illustra (bought by Informix, bought by IBM) 1994-1995 − Postgres95 Support for SQL was added in 1994 Released as Postgres95 in 1995 Re-released as PostgreSQL 6.0 in 1996 Establishment of the PostgreSQL Global Development Team Key Features of PostgreSQL PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC). PostgreSQL supports a large part of the SQL standard and offers many modern features including the following − Complex SQL queries SQL Sub-selects Foreign keys Trigger Views Transactions Multiversion concurrency control (MVCC) Streaming Replication (as of 9.0) Hot Standby (as of 9.0) You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new − Data types Functions Operators Aggregate functions Index methods Procedural Languages Support PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.'); INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup. I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try. Install Directly or not? On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do. In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM. Installing Docker Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled. After logging back in I then got the following message about hardware-assisted virtualization not being enabled. After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen. Open a command prompt and run the following command. docker run hello-world You should output that starts with the following if your installation is working. Hello from Docker! This message shows that your installation appears to be working correctly. What about Postgres? Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container. docker create -v /var/lib/postgresql/data --name PostgresData alpine The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects. Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command. docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container. If you run the docker ps -a command it will show you all your containers. As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running. '); INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it. The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.) Step 1: Install PostgreSQL Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution): Open a terminal window. Issue the command sudo apt-get install postgresql. Type the sudo password necessary to give you admin rights and hit Enter. Allow apt to pick up any necessary dependencies. Once the installation is complete, it''s time to set this baby up. Step 2: Change the default user password Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure. Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so: Open a terminal window. Issue the command sudo passwd postgres. Type (and confirm) that password to be used for this user. The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like: postgres=# All other users have to gain access to the prompt like so: psql DB_NAME where DB_NAME is the name of an existing database. '); INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL. This seems to be the journey: 1. Lack of migrations is awesome! We can iterate so quickly for MVP 2. Get users 3. Add features, still enjoying the speed of iteration 4. Get more users 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts) 6. Realise you desperately need joins, transactions and other SQL features 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back. I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? My thought is definitely yes. brandur on Aug 29, 2017 [-] > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with? I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project. The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this. The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about. Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week. martinald on Aug 29, 2017 [-] I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away. I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done. Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly. I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly. I think I assumed the "crowd" had done the tech due diligence on this stuff and it definitely wasn''t the case. '); INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan. The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto. Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message. As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial. A large amount of "engineering" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision. Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.'); INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts. I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.” I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices. Red Bull could sponsor it. I’d buy a T-shirt. kbenson 8 months ago [-] That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs. You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something. If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource. Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already... 1: https://github.com/gothinkster/realworld 2: https://www.techempower.com/benchmarks/ etxm 8 months ago [-] Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :) It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time. It would be cool to see things like disaster recovery and chaos proofing as well. '); INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community. Leveraging the community There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed. On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release. For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system. Always quality focused No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert. Learn more'); INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query. Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack. Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads. maxxxxx 8 months ago [-] Agreed. Switching to another system is expensive and the benefit is pretty questionable. emsy 8 months ago [-] Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once. TremendousJudge 8 months ago [-] expand, please? maxxxxx 8 months ago [-] I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure. In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other. gopalv 8 months ago [-] > Depending on your data some databases may be better than others and that should be easy to measure. And the performance difference could be an accidental feature of the design and completely unintentional. Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated. Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs). When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural. And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two. Though how it came about isn''t really intentional. '); """), ]
normal
{ "blob_id": "8b0eed6d1f24b5dd30726ce08c97354a5d5ab69b", "index": 7597, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n", "step-4": "import django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n dependencies = [('grafit', '0002_article')]\n operations = [migrations.RunSQL(\n \"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"\n )]\n", "step-5": "# Generated by Django 2.1.2 on 2018-10-25 09:36\n\nimport django.contrib.auth.models\nimport django.contrib.auth.validators\nfrom django.db import migrations, models\nimport django.utils.timezone\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('grafit', '0002_article'),\n ]\n\n operations = [\n migrations.RunSQL(\"\"\"\n INSERT INTO grafit_article (id, title, text) VALUES (2, 'MongoDB', 'MongoDB is a free and open-source cross-platform document-oriented database program. Classified as a NoSQL database program, MongoDB uses JSON-like documents with schemata. MongoDB is developed by MongoDB Inc., and is published under a combination of the Server Side Public License and the Apache License.\n 10gen software company began developing MongoDB in 2007 as a component of a planned platform as a service product. In 2009, the company shifted to an open source development model, with the company offering commercial support and other services. In 2013, 10gen changed its name to MongoDB Inc.[6]\n\n On October 20, 2017, MongoDB became a publicly-traded company, listed on NASDAQ as MDB with an IPO price of $24 per share.[7] Ad hoc queries\n\n MongoDB supports field, range query, and regular expression searches.[8] Queries can return specific fields of documents and also include user-defined JavaScript functions. Queries can also be configured to return a random sample of results of a given size.\n Indexing\n\n Fields in a MongoDB document can be indexed with primary and secondary indices.\n Replication\n\n MongoDB provides high availability with replica sets.[9] A replica set consists of two or more copies of the data. Each replica set member may act in the role of primary or secondary replica at any time. All writes and reads are done on the primary replica by default. Secondary replicas maintain a copy of the data of the primary using built-in replication. When a primary replica fails, the replica set automatically conducts an election process to determine which secondary should become the primary. Secondaries can optionally serve read operations, but that data is only eventually consistent by default.\n Load balancing[10]\n\n MongoDB scales horizontally using sharding. The user chooses a shard key, which determines how the data in a collection will be distributed. The data is split into ranges (based on the shard key) and distributed across multiple shards. (A shard is a master with one or more slaves.). Alternatively, the shard key can be hashed to map to a shard – enabling an even data distribution.\n\n MongoDB can run over multiple servers, balancing the load or duplicating data to keep the system up and running in case of hardware failure. ');\n INSERT INTO grafit_article (id, title, text) VALUES (3, 'NoSQL', 'A NoSQL (originally referring to \"non SQL\" or \"non relational\")[1] database provides a mechanism for storage and retrieval of data that is modeled in means other than the tabular relations used in relational databases. Such databases have existed since the late 1960s, but did not obtain the \"NoSQL\" moniker until a surge of popularity in the early twenty-first century,[2] triggered by the needs of Web 2.0 companies.[3][4][5] NoSQL databases are increasingly used in big data and real-time web applications.[6] NoSQL systems are also sometimes called \"Not only SQL\" to emphasize that they may support SQL-like query languages, or sit alongside SQL database in a polyglot persistence architecture.[7][8]\n\n Motivations for this approach include: simplicity of design, simpler \"horizontal\" scaling to clusters of machines (which is a problem for relational databases),[2] and finer control over availability. The data structures used by NoSQL databases (e.g. key-value, wide column, graph, or document) are different from those used by default in relational databases, making some operations faster in NoSQL. The particular suitability of a given NoSQL database depends on the problem it must solve. Sometimes the data structures used by NoSQL databases are also viewed as \"more flexible\" than relational database tables.[9]\n\n Many NoSQL stores compromise consistency (in the sense of the CAP theorem) in favor of availability, partition tolerance, and speed. Barriers to the greater adoption of NoSQL stores include the use of low-level query languages (instead of SQL, for instance the lack of ability to perform ad-hoc joins across tables), lack of standardized interfaces, and huge previous investments in existing relational databases.[10] Most NoSQL stores lack true ACID transactions, although a few databases, such as MarkLogic, Aerospike, FairCom c-treeACE, Google Spanner (though technically a NewSQL database), Symas LMDB, and OrientDB have made them central to their designs. (See ACID and join support.)\n\n Instead, most NoSQL databases offer a concept of \"eventual consistency\" in which database changes are propagated to all nodes \"eventually\" (typically within milliseconds) so queries for data might not return updated data immediately or might result in reading data that is not accurate, a problem known as stale reads.[11] Additionally, some NoSQL systems may exhibit lost writes and other forms of data loss.[12] Some NoSQL systems provide concepts such as write-ahead logging to avoid data loss.[13] For distributed transaction processing across multiple databases, data consistency is an even bigger challenge that is difficult for both NoSQL and relational databases. Even current relational databases \"do not allow referential integrity constraints to span databases.\"[14] There are few systems that maintain both ACID transactions and X/Open XA standards for distributed transaction processing. ');\n INSERT INTO grafit_article (id, title, text) VALUES (4, 'SQL', 'SQL was initially developed at IBM by Donald D. Chamberlin and Raymond F. Boyce after learning about the relational model from Ted Codd[15] in the early 1970s.[16] This version, initially called SEQUEL (Structured English Query Language), was designed to manipulate and retrieve data stored in IBM''s original quasi-relational database management system, System R, which a group at IBM San Jose Research Laboratory had developed during the 1970s.[16]\n\n Chamberlin and Boyce''s first attempt of a relational database language was Square, but it was difficult to use due to subscript notation. After moving to the San Jose Research Laboratory in 1973, they began work on SEQUEL.[15] The acronym SEQUEL was later changed to SQL because \"SEQUEL\" was a trademark of the UK-based Hawker Siddeley aircraft company.[17]\n\n In the late 1970s, Relational Software, Inc. (now Oracle Corporation) saw the potential of the concepts described by Codd, Chamberlin, and Boyce, and developed their own SQL-based RDBMS with aspirations of selling it to the U.S. Navy, Central Intelligence Agency, and other U.S. government agencies. In June 1979, Relational Software, Inc. introduced the first commercially available implementation of SQL, Oracle V2 (Version2) for VAX computers. By 1986, ANSI and ISO standard groups officially adopted the standard \"Database Language SQL\" language definition. New versions of the standard were published in 1989, 1992, 1996, 1999, 2003, 2006, 2008, 2011,[15] and most recently, 2016. After testing SQL at customer test sites to determine the usefulness and practicality of the system, IBM began developing commercial products based on their System R prototype including System/38, SQL/DS, and DB2, which were commercially available in 1979, 1981, and 1983, respectively.[18] ');\n INSERT INTO grafit_article (id, title, text) VALUES (5, 'MySQL', 'Built on MySQL Enterprise Edition and powered by the Oracle Cloud, Oracle MySQL Cloud Service provides a simple, automated, integrated and enterprise ready MySQL cloud service, enabling organizations to increase business agility and reduce costs. \"Relying on the MySQL engine as the low-level storage layer has allowed us to very quickly build a robust system.\"\n \n\n \"We have successfully implemented MySQL Cluster Carrier Grade Edition for our highly mission critical XDMS application which will enable the next generation of converged services.\"\n \n\n \"We found that MySQL was the best database in terms of the price-point and functionality it offers up. The benefits that MySQL brings to our Brightmail product is its relaiability, robustness and very low-cost administration costs.\"');\n INSERT INTO grafit_article (id, title, text) VALUES (6, 'Critical Flaw Reported In phpMyAdmin Lets Attackers Damage Databases', 'A critical security vulnerability has been reported in phpMyAdmin—one of the most popular applications for managing the MySQL database—which could allow remote attackers to perform dangerous database operations just by tricking administrators into clicking a link.\n\n Discovered by an Indian security researcher, Ashutosh Barot, the vulnerability is a cross-site request forgery (CSRF) attack and affects phpMyAdmin versions 4.7.x (prior to 4.7.7).\n\n Cross-site request forgery vulnerability, also known as XSRF, is an attack wherein an attacker tricks an authenticated user into executing an unwanted action.\n\n According to an advisory released by phpMyAdmin, \"by deceiving a user to click on a crafted URL, it is possible to perform harmful database operations such as deleting records, dropping/truncating tables, etc.\"\n\n phpMyAdmin is a free and open source administration tool for MySQL and MariaDB and is widely used to manage the database for websites created with WordPress, Joomla, and many other content management platforms.\n\n Moreover, a lot of hosting providers use phpMyAdmin to offer their customers a convenient way to organize their databases.\n Barot has also released a video, as shown above, demonstrating how a remote attacker can make database admins unknowingly delete (DROP) an entire table from the database just by tricking them into clicking a specially crafted link.\n\n \"A feature of phpMyAdmin was using a GET request and after that POST request for Database operations such as DROP TABLE table_name; GET requests must be protected against CSRF attacks. In this case, POST requests were used which were sent through URL (for bookmarking purpose may be); it was possible for an attacker to trick a database admin into clicking a button and perform a drop table database query of the attacker’s choice.\" Barot explains in a blog post.\n\n However, performing this attack is not simple as it may sound. To prepare a CSRF attack URL, the attacker should be aware of the name of targeted database and table.\n\n \"If a user executes a query on the database by clicking insert, DROP, etc. buttons, the URL will contain database name and table name,\" Barot says. \"This vulnerability can result in the disclosure of sensitive information as the URL is stored at various places such as browser history, SIEM logs, Firewall Logs, ISP Logs, etc.\"\n\n Barot reported the vulnerability to phpMyAdmin developers, who confirmed his finding and released phpMyAdmin 4.7.7 to address this issue. So administrators are highly recommended to update their installations as soon as possible.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (25, 'Death By Database', 'The following is a true story, but with names changed.\n\n When I work with clients to build software, I take the usual steps of understanding their needs, gathering requirements, learning about their customers, and so on. At this point I have a model on paper of roughly what the software is intended to do, so they get surprised when I immediately turn to database design.\n\n \"Who care about database design? What about mockups? What about workflows?\"\n\n Let me tell you about \"Bob''s Luxury Goods.\" I worked for this company many years ago and they had a retail store selling ... you guessed it ... luxury goods. They''d ask all customers for a billing address and if they had a different delivery address. At the database level, they had a \"one-to-many\" relationship between customers and addresses.\n\n That was their first problem. A customer''s partner might come into Bob''s and order something and if the address was entered correctly it would be flagged as \"in use\" and we had to use a different address or deliberately enter a typo. Fortunately, addresses were case-sensitive, so many people had UPPER-CASE ADDRESSES.\n\n We should have had a many-to-many relationship between customers and addresses so we could handle the case where more than one person would share the same address, but we didn''t. Further, I was never allocated the time to fix the database because it was \"cheaper\" to remove the restriction on \"flagged\" addresses and allow a duplicate address to be used.\n\n Naturally, being a luxury goods company, we had many repeat customers and sometimes they would move and if we didn''t find the duplicate address, or the address with the \"typo\", we might update the address for one partner, but not the other. That was a headache, but it didn''t happen frequently enough for management to worry about it.\n\n That''s when the marketing department had a brilliant, inexpensive idea. You see, we periodically did mass mailings of special events to our customers. Since we had the software to do mass mailings, why not import a mailing list of all addresses in high net worth areas and mail everyone about upcoming special events? So the company went ahead and bought a database with all of these addresses, but forgot to mention to me that I was supposed to implement this.\n\n Except that every address record had the customer id embedded in it, so we couldn''t enter an address without a customer.\n\n \"Curtis,\" they said, \"just enter a dummy customer called ''Occupant'' and attach all addresses to that.\"\n\n Except you couldn''t enter a customer without an order.\n\n Except you couldn''t enter an order without at least one item on it.\n\n Except you couldn''t enter an item unless it was listed in inventory.\n\n Except that reserved the \"inventory\" item and made it unavailable.\n\n Except, except, except ...\n\n It came down to trying to create a fake customer, with a fake order, with a fake item, with a fake item category, with a \"paid\" invoice, with exceptions sprinkled throughout the codebase to handle all of these special cases and probably more that I no longer remember.\n\n Then, and only then, could I write the code to provide \"generic\" mass mailings. Management decided it was easier to hire an outside company to handle the mailing list for them.\n\n If they had simply had a proper database design up front, they could have reused their existing system with little trouble.\n\n That''s what bad database design costs you and why I usually start with that before writing my software.\n\n Note: if you''re not familiar with database design, here''s a talk I give where I make it fairly simple to understand. I mostly avoid big words.');\n INSERT INTO grafit_article (id, title, text) VALUES (33, 'GitHub Actions: built by you, run by us', 'Yesterday at GitHub Universe, we announced GitHub Actions, a new way to automate and customize your workflows. Configuring the apps and services that make up your development cycle takes significant time and effort. GitHub Actions applies open source principles to workflow automation, weaving together the tools you use from idea to production into one complete workflow. You can also create, share, and discover any actions your projects require, just as you would create, share, and discover code on GitHub.\n\n Learn more about actions\n\n As we prepared for Universe, we shared GitHub Actions with a group of customers, integrators, and open source maintainers to see what they could do. In just a few short weeks, talented teams and individuals alike have created hundreds of GitHub Actions. During today’s Universe keynote, we heard directly from developers, and we’re excited to share their work with you');\n INSERT INTO grafit_article (id, title, text) VALUES (34, 'Git Submodule Vulnerability Announced ', '\n\n The Git project has disclosed CVE-2018-17456, a vulnerability in Git that can cause arbitrary code to be executed when a user clones a malicious repository. Git v2.19.1 has been released with a fix, along with backports in v2.14.5, v2.15.3, v2.16.5, v2.17.2, and v2.18.1. We encourage all users to update their clients to protect themselves.\n\n Until you’ve updated, you can protect yourself by avoiding submodules from untrusted repositories. This includes commands such as git clone --recurse-submodules and git submodule update.\n Affected products\n GitHub Desktop\n\n GitHub Desktop versions 1.4.1 and older included an embedded version of Git that was affected by this vulnerability. We encourage all GitHub Desktop users to update to the newest version (1.4.2 and 1.4.3-beta0) available today in the Desktop app.\n Atom\n\n Atom included the same embedded Git and was also affected. Releases 1.31.2 and 1.32.0-beta3 include the patch.\n\n Ensure you’re on the latest Atom release by completing any of the following:\n\n Windows: From the toolbar, click Help -> Check for Updates\n MacOS: From the menu bar, click Atom -> Check for Update\n Linux: Update manually by downloading the latest release from atom.io\n\n Git on the command line and other clients\n\n In order to be protected from the vulnerability, you must update your command-line version of Git, and any other application that may include an embedded version of Git, as they are independent of each other.\n Additional notes\n\n Neither GitHub.com nor GitHub Enterprise are directly affected by the vulnerability. However, as with previously discovered vulnerabilities, GitHub.com will detect malicious repositories, and will reject pushes or API requests attempting to create them. Versions of GitHub Enterprise with this detection will ship on October 9.\n Details of the vulnerability\n\n This vulnerability is very similar to CVE-2017-1000117, as both are option-injection attacks related to submodules. In the earlier attack, a malicious repository would ship a .gitmodules file pointing one of its submodules to a remote repository with an SSH host starting with a dash (-). The ssh program—spawned by Git—would then interpret that as an option. This attack works in a similar way, except that the option-injection is against the child git clone itself.\n\n The problem was reported on September 23 by @joernchen, both to Git’s private security list, as well as to GitHub’s Bug Bounty program. Developers at GitHub worked with the Git community to develop a fix.\n\n The basic fix was clear from the report. However, due to to the similarity to CVE-2017-1000117, we also audited all of the .gitmodules values and implemented stricter checks as appropriate. These checks should prevent a similar vulnerability in another code path. We also implemented detection of potentially malicious submodules as part of Git’s object quality checks (which was made much easier by the infrastructure added during the last submodule-related vulnerability).\n\n The coordinated disclosure date of October 5 was selected by Git developers to allow packagers to prepare for the release. This also provided hosting sites (with custom implementations) ample time to detect and block the attack before it became public. Members of the Git community checked the JGit and libgit2 implementations. Those are not affected by the vulnerability because they clone submodules via function calls rather than separate commands.\n\n We were also able to use the time to scan all repositories on GitHub for evidence of the attack being used in the wild. We’re happy to report that no instances were found (and now, with our detection, none can be added).\n\n Please update your copy of Git soon, and happy cloning!\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (21, 'Hackers Targeting Servers Running Database Services for Mining Cryptocurrency', 'Security researchers have discovered multiple attack campaigns conducted by an established Chinese criminal group that operates worldwide, targeting database servers for mining cryptocurrencies, exfiltrating sensitive data and building a DDoS botnet.\n\n The researchers from security firm GuardiCore Labs have analyzed thousands of attacks launched in recent months and identified at least three attack variants—Hex, Hanako, and Taylor—targeting different MS SQL and MySQL servers for both Windows and Linux.\n\n The goals of all the three variants are different—Hex installs cryptocurrency miners and remote access trojans (RATs) on infected machines, Taylor installs a keylogger and a backdoor, and Hanako uses infected devices to build a DDoS botnet.\n\n So far, researchers have recorded hundreds of Hex and Hanako attacks and tens of thousands of Taylor attacks each month and found that most compromised machines are based in China, and some in Thailand, the United States, Japan and others.\n\n To gain unauthorized access to the targeted database servers, the attackers use brute force attacks and then run a series of predefined SQL commands to gain persistent access and evade audit logs.\n\n What''s interesting? To launch the attacks against database servers and serve malicious files, attackers use a network of already compromised systems, making their attack infrastructure modular and preventing takedown of their malicious activities.');\n INSERT INTO grafit_article (id, title, text) VALUES (22, 'RIP Open Source MySQL', ' This is an excellent opportunity for the Postgres community to step up an promote Postgres.\n\n \n \n rbanffy on Aug 18, 2012 [-]\n\n I think this would be a mistake.\n\n This is an excellent opportunity to demonstrate that anyone can fork the MySQL codebase and create other plug-in replacement databases with it, such as MariaDB and Drizzle.\n\n All that is lost is the MySQL name and brand.\n\n PostgreSQL users and developers must seize the opportunity to show businesses that free software cannot be killed, not even by mighty Oracle. They and, most notably, Microsoft, have been trying to kill it for more than a decade now.\n\n Because the anti-free-software FUD machine (fed in part by Oracle itself) is already having a wonderful time with this.\n\n \n \n Udo on Aug 18, 2012 [-]\n\n I wish I could mod this up a hundred times. PostgreSQL people themselves have been playing into the hands of corporate FUDders with their incessant and inappropriate peddling. MySQL is not your enemy, MS SQL Server is. Oracle''s software empire as a whole certainly is your enemy. Show some solidarity with a fellow open source project!\n\n MySQL and PostgreSQL represent two very different implementation philosophies, and being able to choose between them according to taste and merits is a good thing.\n\n Most of us have suspected that the MySQL project itself was going to die as it was acquired by Oracle, in the same way Open Office died when it was acquired by Oracle. This is a company where good software goes to expire, either due to a deliberate intention or gross incompetence I can''t say but I suspect it''s a mixture of both. However sad that may be for the MySQL (or OpenOffice) brand name, the code itself lives on and continues to evolve within a rich open source ecosystem.\n\n Hence, sensational and petulant \"RIP $PRODUCTNAME\" articles are unnecessary. There is no threat to existing projects based on MySQL or any other successful open source project for that matter. Not only will this stuff be free forever, it will also continue to grow and be developed on its own.\n\n The corporate assassination of open source projects will only work if we let it, it''s a purely psychological game. ');\n INSERT INTO grafit_article (id, title, text) VALUES (23, 'Free Text Sources', 'There are a few interesting things to talk about surrounding free and open textbooks. Quality is one. Usability is another. Why to write one (and/or, why not) is certainly critical. But where can you find these disruptive, open texts?\n\n Not all faculty know there are free and open texts they can use; finding free and/or open textbooks (or even knowing to look) can sometimes be a trick. I knew about one or two sources, and did a little bit more digging. Admittedly, many of the sources of free texts linked below have a technical bent. On one hand, this might be because math, computing, and the sciences are familiar with working openly and giving things away. On the other, it might be because I am a member of the computing faculty, and therefore am most familiar with resources in that space.');\n INSERT INTO grafit_article (id, title, text) VALUES (24, 'Apache Software Foundation Public Mail Archives', 'A collection of all publicly available mail archives from the Apache55 Software Foundation (ASF), taken on July 11, 2011. This collection contains all publicly available email archives from the ASF''s 80+ projects (http://mail-archives.apache.org/mod_mbox/), including mailing lists such as Apache HTTPD Server, Apache Tomcat, Apache Lucene and Solr, Apache Hadoop and many more. Generally speaking, most projects have at least three lists: user, dev and commits, but some have more, some have less. The user lists are where users of the software ask questions on usage, while the dev list usually contains discussions on the development of the project (code, releases, etc.) The commit lists usually consists of automated notifications sent by the various ASF version control tools, like Subversion or CVS, and contain information about changes made to the project''s source code.\n\n Both tarballs and per project sets are available in the snapshot. The tarballs are organized according to project name. Thus, a-d.tar.gz contains all ASF projects that begin with the letters a, b, c or d, such as abdera.apache.org. Files within the project are usually gzipped mbox files.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (26, 'PostgreSQL - Overview', 'PostgreSQL is a powerful, open source object-relational database system. It has more than 15 years of active development phase and a proven architecture that has earned it a strong reputation for reliability, data integrity, and correctness.\n\n This tutorial will give you a quick start with PostgreSQL and make you comfortable with PostgreSQL programming.\n What is PostgreSQL?\n\n PostgreSQL (pronounced as post-gress-Q-L) is an open source relational database management system (DBMS) developed by a worldwide team of volunteers. PostgreSQL is not controlled by any corporation or other private entity and the source code is available free of charge.\n A Brief History of PostgreSQL\n\n PostgreSQL, originally called Postgres, was created at UCB by a computer science professor named Michael Stonebraker. Stonebraker started Postgres in 1986 as a follow-up project to its predecessor, Ingres, now owned by Computer Associates.\n\n 1977-1985 − A project called INGRES was developed.\n\n Proof-of-concept for relational databases\n\n Established the company Ingres in 1980\n\n Bought by Computer Associates in 1994\n\n 1986-1994 − POSTGRES\n\n Development of the concepts in INGRES with a focus on object orientation and the query language - Quel\n\n The code base of INGRES was not used as a basis for POSTGRES\n\n Commercialized as Illustra (bought by Informix, bought by IBM)\n\n 1994-1995 − Postgres95\n\n Support for SQL was added in 1994\n\n Released as Postgres95 in 1995\n\n Re-released as PostgreSQL 6.0 in 1996\n\n Establishment of the PostgreSQL Global Development Team\n\n Key Features of PostgreSQL\n\n PostgreSQL runs on all major operating systems, including Linux, UNIX (AIX, BSD, HP-UX, SGI IRIX, Mac OS X, Solaris, Tru64), and Windows. It supports text, images, sounds, and video, and includes programming interfaces for C / C++, Java, Perl, Python, Ruby, Tcl and Open Database Connectivity (ODBC).\n\n PostgreSQL supports a large part of the SQL standard and offers many modern features including the following −\n\n Complex SQL queries\n SQL Sub-selects\n Foreign keys\n Trigger\n Views\n Transactions\n Multiversion concurrency control (MVCC)\n Streaming Replication (as of 9.0)\n Hot Standby (as of 9.0)\n\n You can check official documentation of PostgreSQL to understand the above-mentioned features. PostgreSQL can be extended by the user in many ways. For example by adding new −\n\n Data types\n Functions\n Operators\n Aggregate functions\n Index methods\n\n Procedural Languages Support\n\n PostgreSQL supports four standard procedural languages, which allows the users to write their own code in any of the languages and it can be executed by PostgreSQL database server. These procedural languages are - PL/pgSQL, PL/Tcl, PL/Perl and PL/Python. Besides, other non-standard procedural languages like PL/PHP, PL/V8, PL/Ruby, PL/Java, etc., are also supported.');\n INSERT INTO grafit_article (id, title, text) VALUES (27, 'Setup PostgreSQL on Windows with Docker', 'Over the weekend I finally got the chance to start reading A Curious Moon by Rob Conery which is a book on learning PostgreSQL by following the fictional Dee Yan as she is thrown into database administrator role at an aerospace startup.\n\n I have a lot of experience using Microsoft’s SQL Server, but up until now, I haven’t touched PostgreSQL. For personal projects SQL Server’s cost and be prohibitive and the release of Rob’s book added up to a good time to give PostgreSQL a try.\n Install Directly or not?\n\n On the download section of the official Postgres site, there is an option to download an installer. This is the route I was going to at first, but in Rob’s book, he suggests using a VM for Postgres installation on Windows. This kicked off a lot of searching on my part and didn’t find a good definitive answer on why that is or isn’t the way to do.\n\n In the end, I decided to try and run the Postgres process using Docker instead installing directly on Windows or dealing with a full VM.\n Installing Docker\n\n Head to this link and click the Get Docker link to download the installer. After the install is complete you will have to log out and back in. When I logged back in I got a message about Hyper-V not being enabled.\n\n After logging back in I then got the following message about hardware-assisted virtualization not being enabled.\n\n After tweaking my BIOS settings and logging back in I was greeted by the Docker welcome screen.\n\n Open a command prompt and run the following command.\n\n docker run hello-world\n\n You should output that starts with the following if your installation is working.\n\n Hello from Docker!\n This message shows that your installation appears to be working correctly.\n\n What about Postgres?\n\n Getting up and going with a container running Postgres was pretty simple and could be done with the following command which will create a container and expose the port used by Postgres so it can be accessed from the host.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d postgres\n\n The problem with this approach is if you ever need to rebuild the container for some reason, like a new version of Postgres is released, your data will be lost. Thankfully I found this blog post which shows how to use a secondary container for the data leaving the Postgres container able to be destroyed and recreated as needed. The following is the command I used to create my data container.\n\n docker create -v /var/lib/postgresql/data --name PostgresData alpine\n\n The above creates a container named PostgresData based on the Alpine image. It is important that the -v parameter matches the path that Postgres expects.\n\n Now that we have a container that will keep our data safe let’s create the actual Postgres container with the following command.\n\n docker run -p 5432:5432 --name yourContainerName -e POSTGRES_PASSWORD=yourPassword -d --volumes-from PostgresData postgres\n\n The only difference from the first example run command is the addition of –volumes-from PostgresData which tells the container to use the PostgresData container.\n\n If you run the docker ps -a command it will show you all your containers.\n\n As you can see in my example I have two containers only one of which is actually running. Make sure you don’t remove the data container just because it will never show as running.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (28, 'DIY: A PostgreSQL database server setup anyone can handle', 'When it comes to databases, I''m a fan of MySQL. The open source database can handle just about any load you want to throw at it, and it has lots of powerful tools that can be used to manage it.\n\n The other popular open source database is PostgreSQL, which is cross-platform and is used by numerous applications. Although PostgreSQL is often seen as being as powerful as MySQL, it doesn''t have nearly the number of available tools to make setup and management as easy as its competition. So I''ve written this handy PostgreSQL primer on how to get your database server up and running and ready to use. (Although PostgreSQL is cross-platform, I demonstrate the installation and setup on a Ubuntu 11.04 machine because it''s my platform of choice. The translation to other platforms should be simple.)\n Step 1: Install PostgreSQL\n\n Here are the installation steps on Ubuntu (this installation will also work on any Debian-based distribution):\n\n Open a terminal window.\n Issue the command sudo apt-get install postgresql.\n Type the sudo password necessary to give you admin rights and hit Enter.\n Allow apt to pick up any necessary dependencies.\n\n Once the installation is complete, it''s time to set this baby up.\n Step 2: Change the default user password\n\n Caution: If you don''t follow this step, you will not be able to add databases and administer PostgreSQL, and the database will not be secure.\n\n Here''s how to change the password for the default user. The user in question is postgres, and the password is changed like so:\n\n Open a terminal window.\n Issue the command sudo passwd postgres.\n Type (and confirm) that password to be used for this user.\n\n The postgres user will be the only user on your system that can open the PostgreSQL prompt without defining a database, which means postgres is the only user who can administer PostgreSQL. To test this, change to the postgres user with the command su - postgres and then enter the command psql. You should now be at the Postgres prompt, which looks like:\n\n postgres=#\n\n All other users have to gain access to the prompt like so:\n\n psql DB_NAME\n\n where DB_NAME is the name of an existing database.\n ');\n INSERT INTO grafit_article (id, title, text) VALUES (31, 'The Marketing Behind MongoDB', ' 100% of my friends who have used Mongo/similar NoSQL have given up and had a nasty rewrite back to pgSQL.\n\n This seems to be the journey:\n\n 1. Lack of migrations is awesome! We can iterate so quickly for MVP\n\n 2. Get users\n\n 3. Add features, still enjoying the speed of iteration\n\n 4. Get more users\n\n 5. Start building reporting features for enterprise/customer support/product metrics (ie: when the real potential success starts)\n\n 6. Realise you desperately need joins, transactions and other SQL features\n\n 7. Pause product dev for 1-3+ months to migrate back to SQL, or do some weird parallel development process to move it piecemeal back.\n\n I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n My thought is definitely yes.\n\n \n \n brandur on Aug 29, 2017 [-]\n\n > I think the most interesting question though is would they be able to get MVP and initial customers that set off this if they were moving (slightly) slower due to SQL and slight overhead that comes with?\n\n I''ve used Postgres and Mongo pretty extensively, and for any reasonably seasoned developer, the startup overhead of an SQL system is a myth. There may upfront cost to learning how an RDMS and SQL work in the first place, but once you''re familiar with them, they''ll be faster than Mongo on any new project.\n\n The schemaless concept of a document database seems to be the major selling factor in velocity of movement, but once you''ve got a good handle on a migration framework in the vein of ActiveRecord or other popular software, that''s negated completely. It also really doesn''t take long before schemaless starts to cause big problems for you in terms of data consistency -- it''s not just the big players that get bitten by this.\n\n The simplified query language is another one. SQL is a little bit obtuse, but it''s not that bad once you have a handle on it, and a lot of people are familiar with it. Once you add in an ORM layer, the lazy-style access of a framework like Sequel or SQLAlchemy makes the developer experience quite a bit better than any Mongo APIs that I''ve seen. Also, after you get beyond trivial usage, SQL''s flexibility so wildly outstrips Mongo''s query documents that it''s not even worth talking about.\n\n Postgres on the other hand ships with a great management CLI, a very powerful REPL (psql), and features like data types/constraints/transactions that guarantee you correctness with zero effort on your part. I can only speak for myself, but I''d take Postgres to the hackathon any day of the week.\n\n \n \n martinald on Aug 29, 2017 [-]\n\n I totally agree with you, and started writing something about how understanding a good ORM takes nearly all the headache away.\n\n I think the thing people do find slow is a lot of ''documents within documents'' in SQL. It turns out this is usually a bad development pattern long term but it is super fast being able to just add docs inside docs with no configuration. It feels very slow writing foreign keys, navigation props and schemas for this in SQL vs JSON, where you can just dump your object in and you''re done.\n\n Basically; I think with noSQL you get some very short term gain for a lot of long term pain, and you''re right, ORMs and other tooling solves this mostly.\n\n I myself fell for this trap, and while it was a nightmare it actually matured me more as a professional more than anything I''ve ever done recently. Regardless of crazy hype, I don''t think I''ll ever fall for a solution so easily without evaluating it properly.\n\n I think I assumed the \"crowd\" had done the tech due diligence on this stuff and it definitely wasn''t the case. ');\n INSERT INTO grafit_article (id, title, text) VALUES (32, 'Countless NoSQL databases competed to be the database of choice', 'n 2013, 10gen — the company behind MongoDB — moved into a large 30,000 square foot office in Midtown Manhattan.\n\n The transfer into the former New York Times building capped off a tremendous period of growth: the database boasted 4 million downloads, the MongoDB User Groups already attracted 15,000 members, and ~10,000 people had attended a global event in 2012. Their offices were truly global from London to Sydney to Dublin and Barcelona — and a requisite west coast headquarters in Palo Alto.\n\n Despite the traction, many startups using MongoDB faced their own challenges. One part of MongoDB’s success among startups was because some didn''t critically assess 10gen’s marketing message.\n\n As engineers, we often discuss technical attacks (e.g., DDoS, Sybil attacks, security vulnerabilities), but need to spend time debating how to protect ourselves from marketing “attacks”.1 Today, developer marketing is subtle — third party blog posts, content marketing disguised as engineering lessons, biased talks, and sponsored hackathons — not clearly marked content from vendors. As such, startup engineering decisions can hinge on sources that are not impartial.\n\n A large amount of \"engineering\" content — even when written by engineers — is actually marketing, rather than thoughtful content whose aim is to help you make the best decision.\n\n Previously, we looked at the hype around NoSQL and common engineering mistakes to see how MongoDB became so successful. Now, let''s take a look into 10gen''s marketing strategy — as told by their employees.2\n\n 10gen’s marketing strategy is an increasingly common playbook and understanding it is useful for future developer tool decisions.');\n INSERT INTO grafit_article (id, title, text) VALUES (30, 'Comment Arrango', ' ArangoDB always makes for exciting benchmark posts.\n\n I could see myself there in a bowler hat with a fistful of racing chits screaming “go, Postgres, go.”\n\n I’d love to see a competition were the developers of each database got to use the same hardware and data then tune the hell out of their configs, queries, and indices.\n\n Red Bull could sponsor it. I’d buy a T-shirt.\n\n \n \n kbenson 8 months ago [-]\n\n That doesn''t sound that hard to start. Something like RealWorld[1] and the Web Framework Benchmarks[2] combined but for DB workloads. Have one dataset that includes data amenable to OLAP and OLTP, but have separate tests each consisting of OLAP queries, OLTP queries, and combined queries. Choose a low-end, mid-range and high-end set of AWS or GCE instances/configs to normalize against. Let people submit pull requests with new technologies or configs.\n\n You''d want to get some funding to run the tests (or maybe solicit Google or Amazon to see if you could get the instance time donated once a month or something.\n\n If you started small, with maybe a portion of these features, and then scaled up over time, you might actually get to the point where you had tests that emulated a power failure, or master/slave and dual master scenarios and how they handle certain common network errors (split-brain). That would be an amazing resource.\n\n Edit: It occurs to me I probably should have read more of the article, since this is sort of what they are doing already...\n\n 1: https://github.com/gothinkster/realworld\n\n 2: https://www.techempower.com/benchmarks/\n\n \n \n etxm 8 months ago [-]\n\n Yeah after I posted it I started thinking about what it would take and what that would actually look like... and how you’d cheat :)\n\n It would probably require a few different categories with some sort of output assertion to validate the query performed right and a means of tracking CPU, usage ram usage, and execution time.\n\n It would be cool to see things like disaster recovery and chaos proofing as well. ');\n INSERT INTO grafit_article (id, title, text) VALUES (35, 'Applying machine intelligence to GitHub security alerts ', 'Last year, we released security alerts that track security vulnerabilities in Ruby and JavaScript packages. Since then, we’ve identified more than four million of these vulnerabilities and added support for Python. In our launch post, we mentioned that all vulnerabilities with CVE IDs are included in security alerts, but sometimes there are vulnerabilities that are not disclosed in the National Vulnerability Database. Fortunately, our collection of security alerts can be supplemented with vulnerabilities detected from activity within our developer community.\n Leveraging the community\n\n There are many places a project can publicize security fixes within a new version: the CVE feed, various mailing lists and open source groups, or even within its release notes or changelog. Regardless of how projects share this information, some developers within the GitHub community will see the advisory and immediately bump their required versions of the dependency to a known safe version. If detected, we can use the information in these commits to generate security alerts for vulnerabilities which may not have been published in the CVE feed.\n\n On an average day, the dependency graph can track around 10,000 commits to dependency files for any of our supported languages. We can’t manually process this many commits. Instead, we depend on machine intelligence to sift through them and extract those that might be related to a security release.\n\n For this purpose, we created a machine learning model that scans text associated with public commits (the commit message and linked issues or pull requests) to filter out those related to possible security upgrades. With this smaller batch of commits, the model uses the diff to understand how required version ranges have changed. Then it aggregates across a specific timeframe to get a holistic view of all dependencies that a security release might affect. Finally, the model outputs a list of packages and version ranges it thinks require an alert and currently aren’t covered by any known CVE in our system.\n Always quality focused\n\n No machine learning model is perfect. While machine intelligence can sift through thousands of commits in an instant, this anomaly-detection algorithm will still generate false positives for packages where no security patch was released. Security alert quality is a focus for us, so we review all model output before the community receives an alert.\n Learn more');\n INSERT INTO grafit_article (id, title, text) VALUES (29, 'Performance Benchmark 2018', 'I''ve stopped reading database benchmarks, because they are extremely vague. Instead I spend my time optimizing my current solution/stack. For example Postgresql has hundreds of knobs that you can adjust for almost every scenario you can imagine. Sometimes you have a special query and increase the work_mem just for that session. Other cases you adjust the cost settings for another query/session. You can analyze your indexes and index types. And sometimes you need to rewrite parts of a big query.\n\n Learning all this takes time, you are much better off learning more about your chosen technology stack than switching to another technology stack.\n\n Though in a few rare races, you need a different technology to solve your business problem. In most cases they complement your existing solution, like Elasticsearch/Solr for full-text search or Clickhouse for OLAP workloads.\n\n \n \n maxxxxx 8 months ago [-]\n\n Agreed. Switching to another system is expensive and the benefit is pretty questionable.\n\n \n \n emsy 8 months ago [-]\n\n Unless you hit a very specific use-case/bottleneck, which I only ever witnessed once.\n\n \n \n TremendousJudge 8 months ago [-]\n\n expand, please?\n\n \n \n maxxxxx 8 months ago [-]\n\n I imagine something very specific like having a lot of inserts into a table and that being your main use case. Depending on your data some databases may be better than others and that should be easy to measure.\n\n In most real-world cases the requirements however are not very clear and often conflicting so it''s much harder to get data that shows the performance of one system over the other.\n\n \n \n gopalv 8 months ago [-]\n\n > Depending on your data some databases may be better than others and that should be easy to measure.\n\n And the performance difference could be an accidental feature of the design and completely unintentional.\n\n Postgres for instance has a native data engine, so it can store the exact row-ids for a row into an index, but this means that every update to the row needs all indexes to be updated.\n\n Mysql has many data engines (InnoDB and MyISAM to start with), to the row-id is somewhat opaque, so the index stores the primary key which can be pushed to the data engine scans and then have it lookup a row-id internally. This needs an index to be touched for the columns you modify explicitly or if the primary key is updated (which is a usual no-no due to UNIQUE lookup costs).\n\n When you have a single wide table with a huge number of indexes, where you update a lot of dimensions frequently, the performance difference between these two solutions is architectural.\n\n And if you lookup along an index with few updates, but long running open txns, that is also materially different - one lookup versus two.\n\n Though how it came about isn''t really intentional. ');\n \"\"\"),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root' INPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root' puSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root' relval = { 'step1': { 'step': 'GEN-HLT', 'timesize': (100, ['MinBias','TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], #??? 'pileupInput': '', 'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' }, 'step2': { 'step': 'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,INPUT_TTBAR], 'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' }, 'GENSIMDIGI': { 'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], #??? 'pileupInput': '', 'fileInput': '', 'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' }, 'HLT': { 'step': 'HLT', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,INPUT_TTBAR], 'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' }, 'FASTSIM': { 'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias','TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' } }
normal
{ "blob_id": "78c9f92349ba834bc64dc84f884638c4316a9ea4", "index": 352, "step-1": "<mask token>\n", "step-2": "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\npuSTARTUP_TTBAR = (\n '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root')\nrelval = {'step1': {'step': 'GEN-HLT', 'timesize': (100, ['MinBias',\n 'TTbar']), 'igprof': (50, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:mc'}, 'step2': {'step':\n 'RAW2DIGI-RECO', 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (\n 200, ['TTbar']), 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'],\n 'pileupInput': puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS,\n INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:startup'}, 'GENSIMDIGI': {\n 'step': 'GEN-SIM,DIGI', 'timesize': (100, ['MinBias',\n 'SingleElectronE1000', 'SingleMuMinusPt10', 'SinglePiMinusE1000',\n 'TTbar']), 'igprof': (5, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'fileInput': '', 'cmsdriver':\n '--eventcontent FEVTDEBUG --conditions auto:mc'}, 'HLT': {'step': 'HLT',\n 'timesize': (8000, ['MinBias', 'TTbar']), 'igprof': (500, ['TTbar']),\n 'memcheck': (5, ['TTbar']), 'pileup': ['TTbar'], 'pileupInput':\n puSTARTUP_TTBAR, 'fileInput': [INPUT_MINBIAS, INPUT_TTBAR], 'cmsdriver':\n '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW'\n }, 'FASTSIM': {'step': 'GEN-FASTSIM', 'timesize': (8000, ['MinBias',\n 'TTbar']), 'igprof': (500, ['TTbar']), 'memcheck': (5, ['TTbar']),\n 'pileup': ['TTbar'], 'cmsdriver':\n '--eventcontent RECOSIM --conditions auto:mc'}}\n", "step-3": "INPUT_MINBIAS = '/build/RAWReference/MinBias_RAW_320_STARTUP.root'\nINPUT_TTBAR = '/build/RAWReference/TTbar_RAW_320_STARTUP.root'\n\npuSTARTUP_TTBAR = '/build/RAWReference/TTbar_Tauola_PileUp_RAW_320_STARTUP.root'\n\nrelval = {\n 'step1': {\t'step': 'GEN-HLT',\n\t\t\t'timesize': (100, ['MinBias','TTbar']),\n\t\t\t'igprof': (50, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:mc' },\n\n\t'step2': {\t'step': 'RAW2DIGI-RECO',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t \t\t'igprof': (200, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:startup' },\n\n\t'GENSIMDIGI': {\t'step': 'GEN-SIM,DIGI',\n\t\t\t'timesize': (100, ['MinBias','SingleElectronE1000','SingleMuMinusPt10','SinglePiMinusE1000','TTbar']),\n\t\t\t'igprof': (5, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n#???\t\t\t'pileupInput': '',\n\t\t\t'fileInput': '',\n\t\t\t'cmsdriver': '--eventcontent FEVTDEBUG --conditions auto:mc' },\n\n\t'HLT': { 'step': 'HLT',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'pileupInput': puSTARTUP_TTBAR,\n\t\t\t'fileInput': [INPUT_MINBIAS,INPUT_TTBAR],\n\t\t\t'cmsdriver': '--eventcontent RAWSIM --conditions auto:startup --processName HLTFROMRAW' },\n\n\t'FASTSIM': {\t'step': 'GEN-FASTSIM',\n\t\t\t'timesize': (8000, ['MinBias','TTbar']),\n\t\t\t'igprof': (500, ['TTbar']),\n\t\t\t'memcheck': (5, ['TTbar']),\n\t\t\t'pileup': ['TTbar'],\n\t\t\t'cmsdriver': '--eventcontent RECOSIM --conditions auto:mc' }\n}\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import requests from bs4 import BeautifulSoup import time print("Put some unfamiliar skills") unfamilar_skills = input(">") print(f"Filtering result for {unfamilar_skills}...\n") def find_jobs(): html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=').text soup = BeautifulSoup(html_text,'lxml') jobs = soup.find_all('li',class_='clearfix job-bx wht-shd-bx') for job in jobs: posted = job.find('span',class_='sim-posted').span.text if("few" in posted): company_name = job.find('h3',class_='joblist-comp-name').text.replace(" ",'') skills = job.find('span',class_='srp-skills').text.replace(' ','') more_info = job.header.a['href'] if unfamilar_skills not in skills: print(f'Company Name: {company_name.strip()}') print(f'Skills: {skills.strip()}') print(f"More Info: {more_info}") print("") if __name__ == '__main__': find_jobs() while True: find_jobs() filter_time = 10 print(f"Waiting for {filter_time} minute") time.sleep(filter_time*60)
normal
{ "blob_id": "92b71c67130cd37b2143fbd9ad71fe9a18b3f7e8", "index": 2622, "step-1": "<mask token>\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\n<mask token>\n", "step-2": "<mask token>\nprint('Put some unfamiliar skills')\n<mask token>\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n", "step-3": "<mask token>\nprint('Put some unfamiliar skills')\nunfamilar_skills = input('>')\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n", "step-4": "import requests\nfrom bs4 import BeautifulSoup\nimport time\nprint('Put some unfamiliar skills')\nunfamilar_skills = input('>')\nprint(f'Filtering result for {unfamilar_skills}...\\n')\n\n\ndef find_jobs():\n html_text = requests.get(\n 'https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation='\n ).text\n soup = BeautifulSoup(html_text, 'lxml')\n jobs = soup.find_all('li', class_='clearfix job-bx wht-shd-bx')\n for job in jobs:\n posted = job.find('span', class_='sim-posted').span.text\n if 'few' in posted:\n company_name = job.find('h3', class_='joblist-comp-name'\n ).text.replace(' ', '')\n skills = job.find('span', class_='srp-skills').text.replace(' ', ''\n )\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f'More Info: {more_info}')\n print('')\n\n\nif __name__ == '__main__':\n find_jobs()\n while True:\n find_jobs()\n filter_time = 10\n print(f'Waiting for {filter_time} minute')\n time.sleep(filter_time * 60)\n", "step-5": "import requests\nfrom bs4 import BeautifulSoup\nimport time\nprint(\"Put some unfamiliar skills\")\nunfamilar_skills = input(\">\")\nprint(f\"Filtering result for {unfamilar_skills}...\\n\")\n\ndef find_jobs():\n html_text = requests.get('https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=').text\n soup = BeautifulSoup(html_text,'lxml')\n jobs = soup.find_all('li',class_='clearfix job-bx wht-shd-bx')\n\n for job in jobs:\n posted = job.find('span',class_='sim-posted').span.text\n if(\"few\" in posted):\n company_name = job.find('h3',class_='joblist-comp-name').text.replace(\" \",'')\n skills = job.find('span',class_='srp-skills').text.replace(' ','')\n more_info = job.header.a['href']\n if unfamilar_skills not in skills:\n print(f'Company Name: {company_name.strip()}')\n print(f'Skills: {skills.strip()}')\n print(f\"More Info: {more_info}\")\n print(\"\")\n\n\nif __name__ == '__main__':\n find_jobs() \n while True:\n find_jobs()\n filter_time = 10\n print(f\"Waiting for {filter_time} minute\")\n time.sleep(filter_time*60)\n\n \n\n\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django import forms from acl.models import Alert class CreateAlertForm(forms.ModelForm): class Meta: model = Alert exclude = ['role', 'age_analysis', 'Date_Uploaded', 'alias_name', 'CAMT_Reveiewer', 'Date_Regularised', 'alert_message', 'Count2']
normal
{ "blob_id": "bfcf6e241881c4f668f926e087ab0f7dcad61dee", "index": 5260, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CreateAlertForm(forms.ModelForm):\n\n\n class Meta:\n model = Alert\n exclude = ['role', 'age_analysis', 'Date_Uploaded', 'alias_name',\n 'CAMT_Reveiewer', 'Date_Regularised', 'alert_message', 'Count2']\n", "step-3": "from django import forms\nfrom acl.models import Alert\n\n\nclass CreateAlertForm(forms.ModelForm):\n\n\n class Meta:\n model = Alert\n exclude = ['role', 'age_analysis', 'Date_Uploaded', 'alias_name',\n 'CAMT_Reveiewer', 'Date_Regularised', 'alert_message', 'Count2']\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
# These are instance types to make available to all AWS EC2 systems, except the . # PostgreSQL server, until the auto tuning playbook can tune for systems that # small. AWSGlobalInstanceChoices = [ 't2.nano', 't2.micro', 't3.nano', 't3.micro', 't3a.nano', 't3a.micro', ] class SpecValidator: def __init__(self, type=None, default=None, choices=[], min=None, max=None): self.type = type self.default = default self.choices = choices self.min = min self.max = max DefaultAWSSpec = { 'available_os': { 'CentOS7': { 'image': SpecValidator( type='string', default="CentOS Linux 7 x86_64 HVM EBS*" ), 'ssh_user': SpecValidator( type='choice', choices=['centos'], default='centos' ) }, 'RedHat7': { 'image': SpecValidator( type='string', default="RHEL-7.8-x86_64*" ), 'ssh_user': SpecValidator( type='choice', choices=['ec2-user'], default='ec2-user' ) }, 'RedHat8': { 'image': SpecValidator( type='string', default="RHEL-8.2-x86_64*" ), 'ssh_user': SpecValidator( type='choice', choices=['ec2-user'], default='ec2-user' ) }, 'RockyLinux8': { 'image': SpecValidator( type='string', default="Rocky-8-ec2-8.5-20211114.2.x86_64" ), 'ssh_user': SpecValidator( type='choice', choices=['rocky'], default='rocky' ) } }, 'dbt2': SpecValidator( type='choice', choices=[True, False], default=False ), 'dbt2_client': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge' ] + AWSGlobalInstanceChoices, default='m5n.xlarge' ), 'volume': { 'type': SpecValidator( type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ), 'iops': SpecValidator( type='integer', min=100, max=64000, default=250 ) }, }, 'dbt2_driver': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge' ] + AWSGlobalInstanceChoices, default='m5n.xlarge' ), 'volume': { 'type': SpecValidator( type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ), 'iops': SpecValidator( type='integer', min=100, max=64000, default=250 ) }, }, 'hammerdb_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge' ] + AWSGlobalInstanceChoices, default='m5n.xlarge' ), 'volume': { 'type': SpecValidator( type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ), 'iops': SpecValidator( type='integer', min=100, max=64000, default=250 ) }, }, 'pem_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'c5.large', 'c5.xlarge', 'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge', 'c5.24xlarge', 'c5.metal' ] + AWSGlobalInstanceChoices, default='c5.xlarge' ), 'volume': { 'type': SpecValidator( type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=100 ), 'iops': SpecValidator( type='integer', min=100, max=64000, default=250 ) } } } DefaultAzureSpec = { 'available_os': { 'CentOS7': { 'publisher': SpecValidator(type='string', default="OpenLogic"), 'offer': SpecValidator(type='string', default="CentOS"), 'sku': SpecValidator(type='string', default="7.7"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RedHat7': { 'publisher': SpecValidator(type='string', default="RedHat"), 'offer': SpecValidator(type='string', default="RHEL"), 'sku': SpecValidator(type='string', default="7.8"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RedHat8': { 'publisher': SpecValidator(type='string', default="RedHat"), 'offer': SpecValidator(type='string', default="RHEL"), 'sku': SpecValidator(type='string', default="8.2"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RockyLinux8': { 'publisher': SpecValidator(type='string', default="Perforce"), 'offer': SpecValidator(type='string', default="rockylinux8"), 'sku': SpecValidator(type='string', default="8"), 'ssh_user': SpecValidator(type='string', default='rocky') } }, 'dbt2': SpecValidator( type='choice', choices=[True, False], default=False ), 'dbt2_driver': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2' ], default='Standard_A2_v2' ), 'volume': { 'storage_account_type': SpecValidator( type='choice', choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS' ) } }, 'dbt2_client': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2' ], default='Standard_A2_v2' ), 'volume': { 'storage_account_type': SpecValidator( type='choice', choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS' ) } }, 'pem_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2' ], default='Standard_A2_v2' ), 'volume': { 'storage_account_type': SpecValidator( type='choice', choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS' ) } }, 'hammerdb_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'Standard_D4ds_v4', 'Standard_D8ds_v4' ], default='Standard_D4ds_v4' ), 'volume': { 'storage_account_type': SpecValidator( type='choice', choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS' ) }, 'additional_volumes': { 'count': SpecValidator( type='integer', min=0, max=5, default=2 ), 'storage_account_type': SpecValidator( type='choice', choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS', 'UltraSSD_LRS'], default='StandardSSD_LRS' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=100 ) } } } DefaultGcloudSpec = { 'available_os': { 'CentOS7': { 'image': SpecValidator(type='string', default="centos-7"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RedHat7': { 'image': SpecValidator(type='string', default="rhel-7"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RedHat8': { 'image': SpecValidator(type='string', default="rhel-8"), 'ssh_user': SpecValidator(type='string', default='edbadm') }, 'RockyLinux8': { 'image': SpecValidator(type='string', default="rocky-linux-8"), 'ssh_user': SpecValidator(type='string', default='rocky') } }, 'dbt2': SpecValidator( type='choice', choices=[True, False], default=False ), 'dbt2_client': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'c2-standard-4', 'c2-standard-8', 'c2-standard-16' ], default='c2-standard-4' ), 'volume': { 'type': SpecValidator( type='choice', choices=['pd-standard', 'pd-ssd'], default='pd-standard' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ) } }, 'dbt2_driver': { 'count': SpecValidator( type='integer', min=0, max=64, default=0 ), 'instance_type': SpecValidator( type='choice', choices=[ 'c2-standard-4', 'c2-standard-8', 'c2-standard-16' ], default='c2-standard-4' ), 'volume': { 'type': SpecValidator( type='choice', choices=['pd-standard', 'pd-ssd'], default='pd-standard' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ) } }, 'hammerdb_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'c2-standard-4', 'c2-standard-8', 'c2-standard-16' ], default='c2-standard-4' ), 'volume': { 'type': SpecValidator( type='choice', choices=['pd-standard', 'pd-ssd'], default='pd-standard' ), 'size': SpecValidator( type='integer', min=10, max=16000, default=50 ) }, 'additional_volumes': { 'count': SpecValidator( type='integer', min=0, max=5, default=2 ), 'type': SpecValidator( type='choice', choices=['pd-standard', 'pd-ssd'], default='pd-ssd' ), 'size': SpecValidator( type='integer', min=10, max=65536, default=100 ) } }, 'pem_server': { 'instance_type': SpecValidator( type='choice', choices=[ 'e2-standard-2', 'e2-standard-4', 'e2-standard-8', 'e2-standard-16', 'e2-standard-32', 'e2-highmem-2', 'e2-highmem-4', 'e2-highmem-8', 'e2-highmem-16' ], default='e2-standard-4' ), 'volume': { 'type': SpecValidator( type='choice', choices=['pd-standard', 'pd-ssd'], default='pd-standard' ), 'size': SpecValidator( type='integer', min=10, max=65536, default=100 ) } } }
normal
{ "blob_id": "4db93bdab2d73e7226dcad61827f5faea8513767", "index": 9888, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass SpecValidator:\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass SpecValidator:\n\n def __init__(self, type=None, default=None, choices=[], min=None, max=None\n ):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\n<mask token>\n", "step-4": "AWSGlobalInstanceChoices = ['t2.nano', 't2.micro', 't3.nano', 't3.micro',\n 't3a.nano', 't3a.micro']\n\n\nclass SpecValidator:\n\n def __init__(self, type=None, default=None, choices=[], min=None, max=None\n ):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\nDefaultAWSSpec = {'available_os': {'CentOS7': {'image': SpecValidator(type=\n 'string', default='CentOS Linux 7 x86_64 HVM EBS*'), 'ssh_user':\n SpecValidator(type='choice', choices=['centos'], default='centos')},\n 'RedHat7': {'image': SpecValidator(type='string', default=\n 'RHEL-7.8-x86_64*'), 'ssh_user': SpecValidator(type='choice', choices=[\n 'ec2-user'], default='ec2-user')}, 'RedHat8': {'image': SpecValidator(\n type='string', default='RHEL-8.2-x86_64*'), 'ssh_user': SpecValidator(\n type='choice', choices=['ec2-user'], default='ec2-user')},\n 'RockyLinux8': {'image': SpecValidator(type='string', default=\n 'Rocky-8-ec2-8.5-20211114.2.x86_64'), 'ssh_user': SpecValidator(type=\n 'choice', choices=['rocky'], default='rocky')}}, 'dbt2': SpecValidator(\n type='choice', choices=[True, False], default=False), 'dbt2_client': {\n 'count': SpecValidator(type='integer', min=0, max=64, default=0),\n 'instance_type': SpecValidator(type='choice', choices=['m5n.xlarge',\n 'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices, default=\n 'm5n.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=\n ['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':\n SpecValidator(type='integer', min=10, max=16000, default=50), 'iops':\n SpecValidator(type='integer', min=100, max=64000, default=250)}},\n 'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'), 'volume': {'type': SpecValidator(type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'),\n 'size': SpecValidator(type='integer', min=10, max=16000, default=50),\n 'iops': SpecValidator(type='integer', min=100, max=64000, default=250)}\n }, 'hammerdb_server': {'instance_type': SpecValidator(type='choice',\n choices=['m5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'] +\n AWSGlobalInstanceChoices, default='m5n.xlarge'), 'volume': {'type':\n SpecValidator(type='choice', choices=['io1', 'io2', 'gp2', 'gp3', 'st1',\n 'sc1'], default='gp2'), 'size': SpecValidator(type='integer', min=10,\n max=16000, default=50), 'iops': SpecValidator(type='integer', min=100,\n max=64000, default=250)}}, 'pem_server': {'instance_type':\n SpecValidator(type='choice', choices=['c5.large', 'c5.xlarge',\n 'c5.2xlarge', 'c5.4xlarge', 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge',\n 'c5.24xlarge', 'c5.metal'] + AWSGlobalInstanceChoices, default=\n 'c5.xlarge'), 'volume': {'type': SpecValidator(type='choice', choices=[\n 'io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'], default='gp2'), 'size':\n SpecValidator(type='integer', min=10, max=16000, default=100), 'iops':\n SpecValidator(type='integer', min=100, max=64000, default=250)}}}\nDefaultAzureSpec = {'available_os': {'CentOS7': {'publisher': SpecValidator\n (type='string', default='OpenLogic'), 'offer': SpecValidator(type=\n 'string', default='CentOS'), 'sku': SpecValidator(type='string',\n default='7.7'), 'ssh_user': SpecValidator(type='string', default=\n 'edbadm')}, 'RedHat7': {'publisher': SpecValidator(type='string',\n default='RedHat'), 'offer': SpecValidator(type='string', default='RHEL'\n ), 'sku': SpecValidator(type='string', default='7.8'), 'ssh_user':\n SpecValidator(type='string', default='edbadm')}, 'RedHat8': {\n 'publisher': SpecValidator(type='string', default='RedHat'), 'offer':\n SpecValidator(type='string', default='RHEL'), 'sku': SpecValidator(type\n ='string', default='8.2'), 'ssh_user': SpecValidator(type='string',\n default='edbadm')}, 'RockyLinux8': {'publisher': SpecValidator(type=\n 'string', default='Perforce'), 'offer': SpecValidator(type='string',\n default='rockylinux8'), 'sku': SpecValidator(type='string', default='8'\n ), 'ssh_user': SpecValidator(type='string', default='rocky')}}, 'dbt2':\n SpecValidator(type='choice', choices=[True, False], default=False),\n 'dbt2_driver': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'dbt2_client': {'count': SpecValidator(type='integer', min=0, max=64,\n default=0), 'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'pem_server': {'instance_type': SpecValidator(type='choice', choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2', 'Standard_A8_v2',\n 'Standard_A2m_v2', 'Standard_A4m_v2', 'Standard_A8m_v2'], default=\n 'Standard_A2_v2'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')}},\n 'hammerdb_server': {'instance_type': SpecValidator(type='choice',\n choices=['Standard_D4ds_v4', 'Standard_D8ds_v4'], default=\n 'Standard_D4ds_v4'), 'volume': {'storage_account_type': SpecValidator(\n type='choice', choices=['Premium_LRS', 'StandardSSD_LRS',\n 'Standard_LRS', 'UltraSSD_LRS'], default='Standard_LRS')},\n 'additional_volumes': {'count': SpecValidator(type='integer', min=0,\n max=5, default=2), 'storage_account_type': SpecValidator(type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'], default='StandardSSD_LRS'), 'size': SpecValidator(type\n ='integer', min=10, max=16000, default=100)}}}\nDefaultGcloudSpec = {'available_os': {'CentOS7': {'image': SpecValidator(\n type='string', default='centos-7'), 'ssh_user': SpecValidator(type=\n 'string', default='edbadm')}, 'RedHat7': {'image': SpecValidator(type=\n 'string', default='rhel-7'), 'ssh_user': SpecValidator(type='string',\n default='edbadm')}, 'RedHat8': {'image': SpecValidator(type='string',\n default='rhel-8'), 'ssh_user': SpecValidator(type='string', default=\n 'edbadm')}, 'RockyLinux8': {'image': SpecValidator(type='string',\n default='rocky-linux-8'), 'ssh_user': SpecValidator(type='string',\n default='rocky')}}, 'dbt2': SpecValidator(type='choice', choices=[True,\n False], default=False), 'dbt2_client': {'count': SpecValidator(type=\n 'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(\n type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}}, 'dbt2_driver': {'count': SpecValidator(type=\n 'integer', min=0, max=64, default=0), 'instance_type': SpecValidator(\n type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}}, 'hammerdb_server': {'instance_type':\n SpecValidator(type='choice', choices=['c2-standard-4', 'c2-standard-8',\n 'c2-standard-16'], default='c2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 16000, default=50)}, 'additional_volumes': {'count': SpecValidator(type\n ='integer', min=0, max=5, default=2), 'type': SpecValidator(type=\n 'choice', choices=['pd-standard', 'pd-ssd'], default='pd-ssd'), 'size':\n SpecValidator(type='integer', min=10, max=65536, default=100)}},\n 'pem_server': {'instance_type': SpecValidator(type='choice', choices=[\n 'e2-standard-2', 'e2-standard-4', 'e2-standard-8', 'e2-standard-16',\n 'e2-standard-32', 'e2-highmem-2', 'e2-highmem-4', 'e2-highmem-8',\n 'e2-highmem-16'], default='e2-standard-4'), 'volume': {'type':\n SpecValidator(type='choice', choices=['pd-standard', 'pd-ssd'], default\n ='pd-standard'), 'size': SpecValidator(type='integer', min=10, max=\n 65536, default=100)}}}\n", "step-5": "# These are instance types to make available to all AWS EC2 systems, except the .\n# PostgreSQL server, until the auto tuning playbook can tune for systems that\n# small.\nAWSGlobalInstanceChoices = [\n 't2.nano', 't2.micro',\n 't3.nano', 't3.micro',\n 't3a.nano', 't3a.micro',\n]\n\n\nclass SpecValidator:\n def __init__(self, type=None, default=None, choices=[], min=None,\n max=None):\n self.type = type\n self.default = default\n self.choices = choices\n self.min = min\n self.max = max\n\n\nDefaultAWSSpec = {\n 'available_os': {\n 'CentOS7': {\n 'image': SpecValidator(\n type='string',\n default=\"CentOS Linux 7 x86_64 HVM EBS*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['centos'],\n default='centos'\n )\n },\n 'RedHat7': {\n 'image': SpecValidator(\n type='string',\n default=\"RHEL-7.8-x86_64*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['ec2-user'],\n default='ec2-user'\n )\n },\n 'RedHat8': {\n 'image': SpecValidator(\n type='string',\n default=\"RHEL-8.2-x86_64*\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['ec2-user'],\n default='ec2-user'\n )\n },\n 'RockyLinux8': {\n 'image': SpecValidator(\n type='string',\n default=\"Rocky-8-ec2-8.5-20211114.2.x86_64\"\n ),\n 'ssh_user': SpecValidator(\n type='choice',\n choices=['rocky'],\n default='rocky'\n )\n }\n\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'm5n.xlarge', 'm5n.2xlarge', 'm5n.4xlarge'\n ] + AWSGlobalInstanceChoices,\n default='m5n.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n },\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c5.large', 'c5.xlarge', 'c5.2xlarge', 'c5.4xlarge',\n 'c5.9xlarge', 'c5.12xlarge', 'c5.18xlarge', 'c5.24xlarge',\n 'c5.metal'\n ] + AWSGlobalInstanceChoices,\n default='c5.xlarge'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['io1', 'io2', 'gp2', 'gp3', 'st1', 'sc1'],\n default='gp2'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=100\n ),\n 'iops': SpecValidator(\n type='integer',\n min=100,\n max=64000,\n default=250\n )\n }\n }\n}\n\nDefaultAzureSpec = {\n 'available_os': {\n 'CentOS7': {\n 'publisher': SpecValidator(type='string', default=\"OpenLogic\"),\n 'offer': SpecValidator(type='string', default=\"CentOS\"),\n 'sku': SpecValidator(type='string', default=\"7.7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat7': {\n 'publisher': SpecValidator(type='string', default=\"RedHat\"),\n 'offer': SpecValidator(type='string', default=\"RHEL\"),\n 'sku': SpecValidator(type='string', default=\"7.8\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat8': {\n 'publisher': SpecValidator(type='string', default=\"RedHat\"),\n 'offer': SpecValidator(type='string', default=\"RHEL\"),\n 'sku': SpecValidator(type='string', default=\"8.2\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RockyLinux8': {\n 'publisher': SpecValidator(type='string', default=\"Perforce\"),\n 'offer': SpecValidator(type='string', default=\"rockylinux8\"),\n 'sku': SpecValidator(type='string', default=\"8\"),\n 'ssh_user': SpecValidator(type='string', default='rocky')\n }\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_A1_v2', 'Standard_A2_v2', 'Standard_A4_v2',\n 'Standard_A8_v2', 'Standard_A2m_v2', 'Standard_A4m_v2',\n 'Standard_A8m_v2'\n ],\n default='Standard_A2_v2'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n }\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'Standard_D4ds_v4', 'Standard_D8ds_v4'\n ],\n default='Standard_D4ds_v4'\n ),\n 'volume': {\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='Standard_LRS'\n )\n },\n 'additional_volumes': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=5,\n default=2\n ),\n 'storage_account_type': SpecValidator(\n type='choice',\n choices=['Premium_LRS', 'StandardSSD_LRS', 'Standard_LRS',\n 'UltraSSD_LRS'],\n default='StandardSSD_LRS'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=100\n )\n }\n }\n}\n\nDefaultGcloudSpec = {\n 'available_os': {\n 'CentOS7': {\n 'image': SpecValidator(type='string', default=\"centos-7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat7': {\n 'image': SpecValidator(type='string', default=\"rhel-7\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RedHat8': {\n 'image': SpecValidator(type='string', default=\"rhel-8\"),\n 'ssh_user': SpecValidator(type='string', default='edbadm')\n },\n 'RockyLinux8': {\n 'image': SpecValidator(type='string', default=\"rocky-linux-8\"),\n 'ssh_user': SpecValidator(type='string', default='rocky')\n }\n },\n 'dbt2': SpecValidator(\n type='choice',\n choices=[True, False],\n default=False\n ),\n 'dbt2_client': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n }\n },\n 'dbt2_driver': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=64,\n default=0\n ),\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n }\n },\n 'hammerdb_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'c2-standard-4', 'c2-standard-8', 'c2-standard-16'\n ],\n default='c2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=16000,\n default=50\n )\n },\n 'additional_volumes': {\n 'count': SpecValidator(\n type='integer',\n min=0,\n max=5,\n default=2\n ),\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-ssd'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=65536,\n default=100\n )\n }\n },\n 'pem_server': {\n 'instance_type': SpecValidator(\n type='choice',\n choices=[\n 'e2-standard-2', 'e2-standard-4', 'e2-standard-8',\n 'e2-standard-16', 'e2-standard-32', 'e2-highmem-2',\n 'e2-highmem-4', 'e2-highmem-8', 'e2-highmem-16'\n ],\n default='e2-standard-4'\n ),\n 'volume': {\n 'type': SpecValidator(\n type='choice',\n choices=['pd-standard', 'pd-ssd'],\n default='pd-standard'\n ),\n 'size': SpecValidator(\n type='integer',\n min=10,\n max=65536,\n default=100\n )\n }\n }\n}\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" 챕터: day4 주제: 반복문(for문) 문제: 1에서 100까지 합을 구하여 출력하시오. 작성자: 한현수 작성일: 2018.9.20. """ result = 0 for i in range(101): result += i print(result)
normal
{ "blob_id": "d2754099adebdb4bd2b028fdf9015571ad773754", "index": 9313, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(101):\n result += i\nprint(result)\n", "step-3": "<mask token>\nresult = 0\nfor i in range(101):\n result += i\nprint(result)\n", "step-4": "\"\"\"\n챕터: day4\n주제: 반복문(for문)\n문제: 1에서 100까지 합을 구하여 출력하시오.\n작성자: 한현수\n작성일: 2018.9.20.\n\"\"\"\nresult = 0\nfor i in range(101):\n result += i\nprint(result)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from rest_framework import serializers from django.contrib.auth import password_validation from rest_framework.validators import UniqueValidator from .models import CustomUser, Role, Permission, ActionEntity from .utils import create_permission class ActionEntitySerializer(serializers.ModelSerializer): id = serializers.IntegerField(required=False) class Meta: model = ActionEntity fields = '__all__' class PermissionSerializer(serializers.ModelSerializer): actionEntitySet = ActionEntitySerializer(many=True) class Meta: model = Permission fields = '__all__' class RoleSerializer(serializers.ModelSerializer): id = serializers.ReadOnlyField() permissions = PermissionSerializer(many=True) name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())]) status = serializers.IntegerField() describe = serializers.CharField(required=False, allow_null=True, max_length=128) class Meta: model = Role fields = '__all__' def create(self, validated_data): permissions = validated_data.pop('permissions', None) role = Role.objects.create(**validated_data) create_permission(role) return role def update(self, instance, validated_data): permissions = validated_data.pop('permissions', None) for permissionData in permissions: for actionData in permissionData.get('actionEntitySet'): action = ActionEntity.objects.get(pk=actionData.get('id')) action.enable = actionData.get('enable') action.save() super().update(instance, validated_data) return instance class SelfChangePasswordSerializer(serializers.Serializer): old_password = serializers.CharField(required=True) new_password = serializers.CharField(required=True) def get_current_user(self): return self.context['request'].user def validate(self, data): old_password = data.get('old_password', None) new_password = data.get('new_password', None) if old_password is not None and not self.get_current_user().check_password(old_password): raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'}) if new_password is not None: password_validation.validate_password(new_password) return super().validate(data) class ChangePasswordSerializer(serializers.Serializer): new_password = serializers.CharField(max_length=128) def get_current_user(self): return self.context['request'].user def validate(self, data): new_password = data.get('new_password', None) if new_password is not None: password_validation.validate_password(new_password) return super().validate(data) class UserCreateSerializer(serializers.ModelSerializer): username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())]) password = serializers.CharField(max_length=128) price_level = serializers.IntegerField(min_value=1, max_value=5) balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0) role_id = serializers.IntegerField(required=False, allow_null=True) class Meta: model = CustomUser fields = '__all__' def validate(self, data): password = data.get('password', None) username = data.get('username', None) if password is not None: password_validation.validate_password(password) return super().validate(data) def create(self, validated_data): user = super().create(validated_data) user.set_password(validated_data['password']) user.save() return user class UserSerializer(serializers.ModelSerializer): role = RoleSerializer(read_only=True) role_id = serializers.IntegerField(required=False, allow_null=True) class Meta: model = CustomUser exclude = ( 'password', ) def update(self, instance, validated_data): validated_data.pop('password', None) return super().update(instance, validated_data) class UserSimpleSerializer(serializers.ModelSerializer): class Meta: model = CustomUser fields = ( 'id', 'username', 'price_level' )
normal
{ "blob_id": "b10a50ce649650542d176a2f6fb8c35c500fbc38", "index": 3644, "step-1": "<mask token>\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n", "step-2": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n <mask token>\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n", "step-3": "<mask token>\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Role\n fields = '__all__'\n <mask token>\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n", "step-4": "<mask token>\n\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = ActionEntity\n fields = '__all__'\n\n\nclass PermissionSerializer(serializers.ModelSerializer):\n actionEntitySet = ActionEntitySerializer(many=True)\n\n\n class Meta:\n model = Permission\n fields = '__all__'\n\n\nclass RoleSerializer(serializers.ModelSerializer):\n id = serializers.ReadOnlyField()\n permissions = PermissionSerializer(many=True)\n name = serializers.CharField(max_length=32, validators=[UniqueValidator\n (queryset=Role.objects.all())])\n status = serializers.IntegerField()\n describe = serializers.CharField(required=False, allow_null=True,\n max_length=128)\n\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n super().update(instance, validated_data)\n return instance\n\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n if old_password is not None and not self.get_current_user(\n ).check_password(old_password):\n raise serializers.ValidationError({'old_password':\n 'Your old password was entered incorrectly. Please enter it again.'\n })\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n if new_password is not None:\n password_validation.validate_password(new_password)\n return super().validate(data)\n\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length=5, max_length=150,\n validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2,\n min_value=0.0)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n if password is not None:\n password_validation.validate_password(password)\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass UserSerializer(serializers.ModelSerializer):\n role = RoleSerializer(read_only=True)\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n\n class Meta:\n model = CustomUser\n exclude = 'password',\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = CustomUser\n fields = 'id', 'username', 'price_level'\n", "step-5": "from rest_framework import serializers\nfrom django.contrib.auth import password_validation\nfrom rest_framework.validators import UniqueValidator\n\nfrom .models import CustomUser, Role, Permission, ActionEntity\nfrom .utils import create_permission\n\nclass ActionEntitySerializer(serializers.ModelSerializer):\n\n id = serializers.IntegerField(required=False)\n \n class Meta:\n model = ActionEntity\n fields = '__all__'\n\nclass PermissionSerializer(serializers.ModelSerializer):\n\n actionEntitySet = ActionEntitySerializer(many=True)\n\n class Meta:\n model = Permission\n fields = '__all__'\n\nclass RoleSerializer(serializers.ModelSerializer):\n\n id = serializers.ReadOnlyField()\n\n permissions = PermissionSerializer(many=True)\n\n name = serializers.CharField(max_length=32, validators=[UniqueValidator(queryset=Role.objects.all())])\n\n status = serializers.IntegerField() \n\n describe = serializers.CharField(required=False, allow_null=True, max_length=128)\n\n class Meta:\n model = Role\n fields = '__all__'\n\n def create(self, validated_data):\n permissions = validated_data.pop('permissions', None)\n role = Role.objects.create(**validated_data)\n create_permission(role)\n return role\n\n def update(self, instance, validated_data):\n permissions = validated_data.pop('permissions', None)\n for permissionData in permissions:\n for actionData in permissionData.get('actionEntitySet'):\n action = ActionEntity.objects.get(pk=actionData.get('id'))\n action.enable = actionData.get('enable')\n action.save()\n\n super().update(instance, validated_data)\n\n return instance\n\nclass SelfChangePasswordSerializer(serializers.Serializer):\n\n old_password = serializers.CharField(required=True)\n new_password = serializers.CharField(required=True)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n old_password = data.get('old_password', None)\n new_password = data.get('new_password', None)\n\n if old_password is not None and not self.get_current_user().check_password(old_password):\n raise serializers.ValidationError({'old_password': 'Your old password was entered incorrectly. Please enter it again.'})\n \n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass ChangePasswordSerializer(serializers.Serializer):\n\n new_password = serializers.CharField(max_length=128)\n\n def get_current_user(self):\n return self.context['request'].user\n\n def validate(self, data):\n new_password = data.get('new_password', None)\n\n if new_password is not None:\n password_validation.validate_password(new_password)\n\n return super().validate(data)\n\nclass UserCreateSerializer(serializers.ModelSerializer):\n username = serializers.CharField(min_length= 5, max_length=150, validators=[UniqueValidator(queryset=CustomUser.objects.all())])\n password = serializers.CharField(max_length=128)\n price_level = serializers.IntegerField(min_value=1, max_value=5)\n balance = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0.0)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n fields = '__all__'\n\n def validate(self, data):\n password = data.get('password', None)\n username = data.get('username', None)\n \n if password is not None:\n password_validation.validate_password(password)\n\n return super().validate(data)\n\n def create(self, validated_data):\n user = super().create(validated_data)\n user.set_password(validated_data['password'])\n user.save()\n return user\n\nclass UserSerializer(serializers.ModelSerializer):\n\n role = RoleSerializer(read_only=True)\n\n role_id = serializers.IntegerField(required=False, allow_null=True)\n\n class Meta:\n model = CustomUser\n exclude = (\n 'password',\n )\n\n def update(self, instance, validated_data):\n validated_data.pop('password', None)\n return super().update(instance, validated_data)\n\nclass UserSimpleSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CustomUser\n fields = (\n 'id', 'username', 'price_level'\n )", "step-ids": [ 7, 17, 18, 23, 26 ] }
[ 7, 17, 18, 23, 26 ]
from django.urls import path from .views.home import Home from .views.signup import Signup from .views.login import Login urlpatterns = [ path('', Home.as_view(), name='home'), path('signup', Signup.as_view(), name='signup'), path('login', Login.as_view(), name='login'), ]
normal
{ "blob_id": "979a387e29867818ffad7291511ff0be40dee118", "index": 1938, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('', Home.as_view(), name='home'), path('signup', Signup\n .as_view(), name='signup'), path('login', Login.as_view(), name='login')]\n", "step-3": "from django.urls import path\nfrom .views.home import Home\nfrom .views.signup import Signup\nfrom .views.login import Login\nurlpatterns = [path('', Home.as_view(), name='home'), path('signup', Signup\n .as_view(), name='signup'), path('login', Login.as_view(), name='login')]\n", "step-4": "from django.urls import path\nfrom .views.home import Home\nfrom .views.signup import Signup\nfrom .views.login import Login\nurlpatterns = [\n path('', Home.as_view(), name='home'),\n path('signup', Signup.as_view(), name='signup'),\n path('login', Login.as_view(), name='login'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import time import json import pygame from pygame.locals import * import urllib.request from pygame.color import THECOLORS pygame.init() Brack=[0,0,0] White=[255,255,255] Green=[0,255,0] Red=[255,0,0] Gray=[169,169,169] button_text=["开 始","开 始","开 始","开 始","开 始"] line=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400'] line0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4'] CGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]] color=[Green,Green,Green,Green,Green] button_text0="手动状态:" button_text1=["工位0","工位1","工位2","工位3","工位4"] Num=['0','1','2','3','4'] B0=[452,522,592,662,732] screen = pygame.display.set_mode((1240,768),FULLSCREEN,32) screen.fill(Brack) pygame.draw.rect(screen,White,[420,134,400,500],0) text=["工 序 甲:","工 序 乙:","工 序 丙:","工 序 丁:","工 序 戊:"] text_0=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",22) text_1=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",18) text_2=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",15) text_fmt0=text_0.render("操 作 界 面",2,Brack) screen.blit(text_fmt0,(545,140)) pygame.display.update() def Process(num,x,y,button_text,color): text_fmt1=text_1.render(text[num],1,Brack) screen.blit(text_fmt1,(x-127,y)) pygame.draw.rect(screen,Brack,[x,y,60,25],2) pygame.draw.rect(screen,color,[x+2,y+2,57,22],0) button=text_2.render(button_text,1,Brack) screen.blit(button,(x+13,y+3)) pygame.display.update() def Station(num,x,y,a): pygame.draw.rect(screen,Brack,[x,y,55,28],2) pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0) button=text_2.render(button_text1[num],1,Brack) screen.blit(button,(x+9,y+4)) img=pygame.image.load('cgq.jpg') img=pygame.transform.smoothscale(img,(52,50)) screen.blit(img,(x,y+80)) button=text_1.render(Num[a],1,Brack) screen.blit(button,(x+20,610)) pygame.display.update() if __name__ == '__main__': while True: time.sleep(1.5) pygame.draw.rect(screen,White,[506,440,85,28],0) pygame.draw.rect(screen,Brack,[597,440,65,28],2) pygame.draw.rect(screen,Green,[599,442,62,25],0) button1=text_1.render("切 换",1,Brack) screen.blit(button1,(611,444)) button=text_1.render(button_text0,1,Brack) screen.blit(button,(506,444)) B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]] if button_text==["开 始","开 始","开 始","开 始","开 始"]: response2=urllib.request.urlopen('http://localhost:5000/carrier/status') html2=response2.read() text2=json.loads(html2) a=text2['sensors'] b=text2['pos'] C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]] pygame.draw.rect(screen,White,[420,525,400,50],0) pygame.draw.rect(screen,White,[420,615,400,30],0) img=pygame.image.load('car.jpg') img=pygame.transform.smoothscale(img,(52,50)) screen.blit(img,(B0[b],525)) if button_text0=="手动状态:": for t in range(5): if button_text[t]=="结 束": button_text[t]="开 始" color[t]=Green elif button_text0=="自动状态:": if button_text[0]=="结 束": response0=urllib.request.urlopen(line[0]) html0=response0.read() text0=json.loads(html0) print(text0) button_text[0]="开 始" button_text[1]="结 束" elif button_text[1]=="结 束": response0=urllib.request.urlopen(line[1]) html0=response0.read() text0=json.loads(html0) print(text0) button_text[1]="开 始" button_text[2]="结 束" elif button_text[2]=="结 束": response0=urllib.request.urlopen(line[2]) html0=response0.read() text0=json.loads(html0) print(text0) button_text[2]="开 始" button_text[3]="结 束" elif button_text[3]=="结 束": response0=urllib.request.urlopen(line[3]) html0=response0.read() text0=json.loads(html0) print(text0) button_text[3]="开 始" button_text[4]="结 束" elif button_text[4]=="结 束": response0=urllib.request.urlopen(line[4]) html0=response0.read() text0=json.loads(html0) print(text0) button_text[4]="开 始" for i in B: Process(i[0],i[1],i[2],i[3],i[4]) for v in C: Station(v[0],v[1],v[2],v[3]) for event in pygame.event.get(): if event.type == KEYDOWN: if event.key == K_ESCAPE: exit() elif event.type == QUIT: exit() elif event.type == pygame.MOUSEBUTTONDOWN: pressed_array = pygame.mouse.get_pressed() pos = pygame.mouse.get_pos() for index in range(len(pressed_array)): if pressed_array[index]: if index==0: if 597<=pos[0]<=662 and 440<=pos[1]<=468: if button_text0=="自动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]: button_text0="手动状态:" color=[Green,Green,Green,Green,Green] elif button_text0=="手动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]: button_text0="自动状态:" button_text[0]="结 束" color=[Gray,Gray,Gray,Gray,Gray] for i in B: if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25: if button_text==["开 始","开 始","开 始","开 始","开 始"] and button_text0=="手动状态:": color[i[0]]=Red button_text[i[0]]="结 束" response1=urllib.request.urlopen(line[i[0]]) html1=response1.read() text1=json.loads(html1) print(text1) for v in C: if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28: response3=urllib.request.urlopen(line0[v[0]]) html3=response3.read() text3=json.loads(html3) pygame.draw.rect(screen,White,[420,525,400,50],0) pygame.draw.rect(screen,White,[420,615,400,30],0) img=pygame.image.load('car.jpg') img=pygame.transform.smoothscale(img,(52,50)) screen.blit(img,(B0[int(text3)],525)) C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]] for f in C: Station(f[0],f[1],f[2],f[3]) pygame.display.update()
normal
{ "blob_id": "609071fc3af1b526fbd4555ced2376f56ae0f3c3", "index": 2174, "step-1": "<mask token>\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\n<mask token>\n", "step-2": "<mask token>\npygame.init()\n<mask token>\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\n<mask token>\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n", "step-3": "<mask token>\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n", "step-4": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack = [0, 0, 0]\nWhite = [255, 255, 255]\nGreen = [0, 255, 0]\nRed = [255, 0, 0]\nGray = [169, 169, 169]\nbutton_text = ['开 始', '开 始', '开 始', '开 始', '开 始']\nline = ['http://localhost:5050/mixer/000',\n 'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',\n 'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']\nline0 = ['http://localhost:5000/carrier/moveto/0',\n 'http://localhost:5000/carrier/moveto/1',\n 'http://localhost:5000/carrier/moveto/2',\n 'http://localhost:5000/carrier/moveto/3',\n 'http://localhost:5000/carrier/moveto/4']\nCGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],\n [1, 1, 1, 1, 0]]\ncolor = [Green, Green, Green, Green, Green]\nbutton_text0 = '手动状态:'\nbutton_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']\nNum = ['0', '1', '2', '3', '4']\nB0 = [452, 522, 592, 662, 732]\nscreen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)\nscreen.fill(Brack)\npygame.draw.rect(screen, White, [420, 134, 400, 500], 0)\ntext = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']\ntext_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)\ntext_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)\ntext_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)\ntext_fmt0 = text_0.render('操 作 界 面', 2, Brack)\nscreen.blit(text_fmt0, (545, 140))\npygame.display.update()\n\n\ndef Process(num, x, y, button_text, color):\n text_fmt1 = text_1.render(text[num], 1, Brack)\n screen.blit(text_fmt1, (x - 127, y))\n pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)\n pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)\n button = text_2.render(button_text, 1, Brack)\n screen.blit(button, (x + 13, y + 3))\n pygame.display.update()\n\n\ndef Station(num, x, y, a):\n pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)\n pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)\n button = text_2.render(button_text1[num], 1, Brack)\n screen.blit(button, (x + 9, y + 4))\n img = pygame.image.load('cgq.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (x, y + 80))\n button = text_1.render(Num[a], 1, Brack)\n screen.blit(button, (x + 20, 610))\n pygame.display.update()\n\n\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)\n pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)\n pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)\n button1 = text_1.render('切 换', 1, Brack)\n screen.blit(button1, (611, 444))\n button = text_1.render(button_text0, 1, Brack)\n screen.blit(button, (506, 444))\n B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,\n button_text[1], color[1]], [2, 647, 290, button_text[2], color[\n 2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,\n button_text[4], color[4]]]\n if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:\n response2 = urllib.request.urlopen(\n 'http://localhost:5000/carrier/status')\n html2 = response2.read()\n text2 = json.loads(html2)\n a = text2['sensors']\n b = text2['pos']\n C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],\n [3, 662, 490, a[3]], [4, 732, 490, a[4]]]\n pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img, (52, 50))\n screen.blit(img, (B0[b], 525))\n if button_text0 == '手动状态:':\n for t in range(5):\n if button_text[t] == '结 束':\n button_text[t] = '开 始'\n color[t] = Green\n elif button_text0 == '自动状态:':\n if button_text[0] == '结 束':\n response0 = urllib.request.urlopen(line[0])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[0] = '开 始'\n button_text[1] = '结 束'\n elif button_text[1] == '结 束':\n response0 = urllib.request.urlopen(line[1])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[1] = '开 始'\n button_text[2] = '结 束'\n elif button_text[2] == '结 束':\n response0 = urllib.request.urlopen(line[2])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[2] = '开 始'\n button_text[3] = '结 束'\n elif button_text[3] == '结 束':\n response0 = urllib.request.urlopen(line[3])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[3] = '开 始'\n button_text[4] = '结 束'\n elif button_text[4] == '结 束':\n response0 = urllib.request.urlopen(line[4])\n html0 = response0.read()\n text0 = json.loads(html0)\n print(text0)\n button_text[4] = '开 始'\n for i in B:\n Process(i[0], i[1], i[2], i[3], i[4])\n for v in C:\n Station(v[0], v[1], v[2], v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index == 0:\n if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:\n if button_text0 == '自动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '手动状态:'\n color = [Green, Green, Green, Green, Green]\n elif button_text0 == '手动状态:' and button_text == [\n '开 始', '开 始', '开 始', '开 始', '开 始']:\n button_text0 = '自动状态:'\n button_text[0] = '结 束'\n color = [Gray, Gray, Gray, Gray, Gray]\n for i in B:\n if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[\n 1] <= i[2] + 25:\n if button_text == ['开 始', '开 始', '开 始',\n '开 始', '开 始'\n ] and button_text0 == '手动状态:':\n color[i[0]] = Red\n button_text[i[0]] = '结 束'\n response1 = urllib.request.urlopen(line\n [i[0]])\n html1 = response1.read()\n text1 = json.loads(html1)\n print(text1)\n for v in C:\n if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[\n 1] <= v[2] + 28:\n response3 = urllib.request.urlopen(line0\n [v[0]])\n html3 = response3.read()\n text3 = json.loads(html3)\n pygame.draw.rect(screen, White, [420, \n 525, 400, 50], 0)\n pygame.draw.rect(screen, White, [420, \n 615, 400, 30], 0)\n img = pygame.image.load('car.jpg')\n img = pygame.transform.smoothscale(img,\n (52, 50))\n screen.blit(img, (B0[int(text3)], 525))\n C = [[0, 452, 490, CGQ[v[0]][0]], [1, \n 522, 490, CGQ[v[0]][1]], [2, 592, \n 490, CGQ[v[0]][2]], [3, 662, 490,\n CGQ[v[0]][3]], [4, 732, 490, CGQ[v[\n 0]][4]]]\n for f in C:\n Station(f[0], f[1], f[2], f[3])\n pygame.display.update()\n", "step-5": "import time\nimport json\nimport pygame\nfrom pygame.locals import *\nimport urllib.request\nfrom pygame.color import THECOLORS\npygame.init()\nBrack=[0,0,0]\nWhite=[255,255,255]\nGreen=[0,255,0]\nRed=[255,0,0]\nGray=[169,169,169]\nbutton_text=[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]\nline=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']\nline0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']\nCGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]\ncolor=[Green,Green,Green,Green,Green]\nbutton_text0=\"手动状态:\"\nbutton_text1=[\"工位0\",\"工位1\",\"工位2\",\"工位3\",\"工位4\"]\nNum=['0','1','2','3','4']\nB0=[452,522,592,662,732]\nscreen = pygame.display.set_mode((1240,768),FULLSCREEN,32)\nscreen.fill(Brack)\npygame.draw.rect(screen,White,[420,134,400,500],0)\ntext=[\"工 序 甲:\",\"工 序 乙:\",\"工 序 丙:\",\"工 序 丁:\",\"工 序 戊:\"]\ntext_0=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",22)\ntext_1=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",18)\ntext_2=pygame.font.Font(\"/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc\",15)\ntext_fmt0=text_0.render(\"操 作 界 面\",2,Brack)\nscreen.blit(text_fmt0,(545,140))\npygame.display.update()\ndef Process(num,x,y,button_text,color):\n text_fmt1=text_1.render(text[num],1,Brack)\n screen.blit(text_fmt1,(x-127,y))\n pygame.draw.rect(screen,Brack,[x,y,60,25],2)\n pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)\n button=text_2.render(button_text,1,Brack)\n screen.blit(button,(x+13,y+3))\n pygame.display.update()\ndef Station(num,x,y,a):\n pygame.draw.rect(screen,Brack,[x,y,55,28],2)\n pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)\n button=text_2.render(button_text1[num],1,Brack)\n screen.blit(button,(x+9,y+4))\n img=pygame.image.load('cgq.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(x,y+80))\n button=text_1.render(Num[a],1,Brack)\n screen.blit(button,(x+20,610))\n pygame.display.update()\nif __name__ == '__main__':\n while True:\n time.sleep(1.5)\n pygame.draw.rect(screen,White,[506,440,85,28],0)\n pygame.draw.rect(screen,Brack,[597,440,65,28],2)\n pygame.draw.rect(screen,Green,[599,442,62,25],0)\n button1=text_1.render(\"切 换\",1,Brack)\n screen.blit(button1,(611,444))\n button=text_1.render(button_text0,1,Brack)\n screen.blit(button,(506,444))\n B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n response2=urllib.request.urlopen('http://localhost:5000/carrier/status')\n html2=response2.read()\n text2=json.loads(html2)\n a=text2['sensors']\n b=text2['pos']\n C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[b],525))\n if button_text0==\"手动状态:\":\n for t in range(5):\n if button_text[t]==\"结 束\":\n button_text[t]=\"开 始\"\n color[t]=Green\n elif button_text0==\"自动状态:\":\n if button_text[0]==\"结 束\":\n response0=urllib.request.urlopen(line[0])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[0]=\"开 始\"\n button_text[1]=\"结 束\"\n elif button_text[1]==\"结 束\":\n response0=urllib.request.urlopen(line[1])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[1]=\"开 始\"\n button_text[2]=\"结 束\"\n elif button_text[2]==\"结 束\":\n response0=urllib.request.urlopen(line[2])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[2]=\"开 始\"\n button_text[3]=\"结 束\"\n elif button_text[3]==\"结 束\":\n response0=urllib.request.urlopen(line[3])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[3]=\"开 始\"\n button_text[4]=\"结 束\"\n elif button_text[4]==\"结 束\":\n response0=urllib.request.urlopen(line[4])\n html0=response0.read()\n text0=json.loads(html0)\n print(text0)\n button_text[4]=\"开 始\"\n for i in B:\n Process(i[0],i[1],i[2],i[3],i[4])\n for v in C:\n Station(v[0],v[1],v[2],v[3])\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n elif event.type == QUIT:\n exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n pressed_array = pygame.mouse.get_pressed()\n pos = pygame.mouse.get_pos()\n for index in range(len(pressed_array)):\n if pressed_array[index]:\n if index==0:\n if 597<=pos[0]<=662 and 440<=pos[1]<=468:\n if button_text0==\"自动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"手动状态:\"\n color=[Green,Green,Green,Green,Green]\n elif button_text0==\"手动状态:\" and button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"]:\n button_text0=\"自动状态:\"\n button_text[0]=\"结 束\"\n color=[Gray,Gray,Gray,Gray,Gray]\n for i in B:\n if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:\n if button_text==[\"开 始\",\"开 始\",\"开 始\",\"开 始\",\"开 始\"] and button_text0==\"手动状态:\":\n color[i[0]]=Red\n button_text[i[0]]=\"结 束\"\n response1=urllib.request.urlopen(line[i[0]])\n html1=response1.read()\n text1=json.loads(html1)\n print(text1)\n for v in C:\n if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:\n response3=urllib.request.urlopen(line0[v[0]])\n html3=response3.read()\n text3=json.loads(html3)\n pygame.draw.rect(screen,White,[420,525,400,50],0)\n pygame.draw.rect(screen,White,[420,615,400,30],0)\n img=pygame.image.load('car.jpg')\n img=pygame.transform.smoothscale(img,(52,50))\n screen.blit(img,(B0[int(text3)],525))\n C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]\n for f in C:\n Station(f[0],f[1],f[2],f[3])\n pygame.display.update()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import numpy as np import matplotlib.pyplot as plt def cos_Taylor2(x, n): s = 0 a = 1 for i in range(0, n+1): s = s+a a = -a*x**2 / ((2*i+1)*(2*i+2)) return s, abs(a) vcos = np.vectorize(cos_Taylor2) def cos_two_terms(x): s = 0 a = 1 s = s+a a = -a*x**2 / ((2*0+1)*(2*0+2)) s = s + a a = -a*x**2 / ((2*1+1)*(2*1+2)) s = s + a a = -a*x**2 / ((2*2+1)*(2*2+2)) return s, abs(a) def test_cos_Taylor(): x = 0.63 tol = 1e-14 s_expected, a_expected = cos_two_terms(x) s_computed, a_computed = cos_Taylor2(x,2) success1 = abs(s_computed - s_expected) < tol success2 = abs(a_computed - a_expected) < tol success = success1 and success2 message = 'Output is different from expected!' assert success, message test_cos_Taylor() x = np.linspace(-5,5,100) n = [0,2,4,6] for i in n: y = vcos(x, i) plt.plot(x, y[0], label='n = %g' % i) y = np.cos(x) plt.plot(x, y, 'b-', label = 'expected') plt.ylim(-1.1,1.1) plt.legend() plt.savefig('cos_Taylor_series_diffeq.png') plt.show() ''' Terminal> cos_Taylor_series_diffeq.py" Process finished with exit code 0 '''
normal
{ "blob_id": "fb0dcb641dfb379751264dc0b18007f5d058d379", "index": 3520, "step-1": "<mask token>\n\n\ndef cos_two_terms(x):\n s = 0\n a = 1\n s = s + a\n a = -a * x ** 2 / ((2 * 0 + 1) * (2 * 0 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 1 + 1) * (2 * 1 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 2 + 1) * (2 * 2 + 2))\n return s, abs(a)\n\n\ndef test_cos_Taylor():\n x = 0.63\n tol = 1e-14\n s_expected, a_expected = cos_two_terms(x)\n s_computed, a_computed = cos_Taylor2(x, 2)\n success1 = abs(s_computed - s_expected) < tol\n success2 = abs(a_computed - a_expected) < tol\n success = success1 and success2\n message = 'Output is different from expected!'\n assert success, message\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef cos_Taylor2(x, n):\n s = 0\n a = 1\n for i in range(0, n + 1):\n s = s + a\n a = -a * x ** 2 / ((2 * i + 1) * (2 * i + 2))\n return s, abs(a)\n\n\n<mask token>\n\n\ndef cos_two_terms(x):\n s = 0\n a = 1\n s = s + a\n a = -a * x ** 2 / ((2 * 0 + 1) * (2 * 0 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 1 + 1) * (2 * 1 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 2 + 1) * (2 * 2 + 2))\n return s, abs(a)\n\n\ndef test_cos_Taylor():\n x = 0.63\n tol = 1e-14\n s_expected, a_expected = cos_two_terms(x)\n s_computed, a_computed = cos_Taylor2(x, 2)\n success1 = abs(s_computed - s_expected) < tol\n success2 = abs(a_computed - a_expected) < tol\n success = success1 and success2\n message = 'Output is different from expected!'\n assert success, message\n\n\ntest_cos_Taylor()\n<mask token>\nfor i in n:\n y = vcos(x, i)\n plt.plot(x, y[0], label='n = %g' % i)\n<mask token>\nplt.plot(x, y, 'b-', label='expected')\nplt.ylim(-1.1, 1.1)\nplt.legend()\nplt.savefig('cos_Taylor_series_diffeq.png')\nplt.show()\n<mask token>\n", "step-3": "<mask token>\n\n\ndef cos_Taylor2(x, n):\n s = 0\n a = 1\n for i in range(0, n + 1):\n s = s + a\n a = -a * x ** 2 / ((2 * i + 1) * (2 * i + 2))\n return s, abs(a)\n\n\nvcos = np.vectorize(cos_Taylor2)\n\n\ndef cos_two_terms(x):\n s = 0\n a = 1\n s = s + a\n a = -a * x ** 2 / ((2 * 0 + 1) * (2 * 0 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 1 + 1) * (2 * 1 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 2 + 1) * (2 * 2 + 2))\n return s, abs(a)\n\n\ndef test_cos_Taylor():\n x = 0.63\n tol = 1e-14\n s_expected, a_expected = cos_two_terms(x)\n s_computed, a_computed = cos_Taylor2(x, 2)\n success1 = abs(s_computed - s_expected) < tol\n success2 = abs(a_computed - a_expected) < tol\n success = success1 and success2\n message = 'Output is different from expected!'\n assert success, message\n\n\ntest_cos_Taylor()\nx = np.linspace(-5, 5, 100)\nn = [0, 2, 4, 6]\nfor i in n:\n y = vcos(x, i)\n plt.plot(x, y[0], label='n = %g' % i)\ny = np.cos(x)\nplt.plot(x, y, 'b-', label='expected')\nplt.ylim(-1.1, 1.1)\nplt.legend()\nplt.savefig('cos_Taylor_series_diffeq.png')\nplt.show()\n<mask token>\n", "step-4": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef cos_Taylor2(x, n):\n s = 0\n a = 1\n for i in range(0, n + 1):\n s = s + a\n a = -a * x ** 2 / ((2 * i + 1) * (2 * i + 2))\n return s, abs(a)\n\n\nvcos = np.vectorize(cos_Taylor2)\n\n\ndef cos_two_terms(x):\n s = 0\n a = 1\n s = s + a\n a = -a * x ** 2 / ((2 * 0 + 1) * (2 * 0 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 1 + 1) * (2 * 1 + 2))\n s = s + a\n a = -a * x ** 2 / ((2 * 2 + 1) * (2 * 2 + 2))\n return s, abs(a)\n\n\ndef test_cos_Taylor():\n x = 0.63\n tol = 1e-14\n s_expected, a_expected = cos_two_terms(x)\n s_computed, a_computed = cos_Taylor2(x, 2)\n success1 = abs(s_computed - s_expected) < tol\n success2 = abs(a_computed - a_expected) < tol\n success = success1 and success2\n message = 'Output is different from expected!'\n assert success, message\n\n\ntest_cos_Taylor()\nx = np.linspace(-5, 5, 100)\nn = [0, 2, 4, 6]\nfor i in n:\n y = vcos(x, i)\n plt.plot(x, y[0], label='n = %g' % i)\ny = np.cos(x)\nplt.plot(x, y, 'b-', label='expected')\nplt.ylim(-1.1, 1.1)\nplt.legend()\nplt.savefig('cos_Taylor_series_diffeq.png')\nplt.show()\n<mask token>\n", "step-5": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef cos_Taylor2(x, n):\n s = 0\n a = 1\n for i in range(0, n+1):\n s = s+a\n a = -a*x**2 / ((2*i+1)*(2*i+2))\n return s, abs(a)\nvcos = np.vectorize(cos_Taylor2)\n\n\ndef cos_two_terms(x):\n s = 0\n a = 1\n s = s+a\n a = -a*x**2 / ((2*0+1)*(2*0+2))\n s = s + a\n a = -a*x**2 / ((2*1+1)*(2*1+2))\n s = s + a\n a = -a*x**2 / ((2*2+1)*(2*2+2))\n return s, abs(a)\n\n\ndef test_cos_Taylor():\n x = 0.63\n tol = 1e-14\n s_expected, a_expected = cos_two_terms(x)\n s_computed, a_computed = cos_Taylor2(x,2)\n success1 = abs(s_computed - s_expected) < tol\n success2 = abs(a_computed - a_expected) < tol\n success = success1 and success2\n message = 'Output is different from expected!'\n assert success, message\ntest_cos_Taylor()\n\n\nx = np.linspace(-5,5,100)\nn = [0,2,4,6]\nfor i in n:\n y = vcos(x, i)\n plt.plot(x, y[0], label='n = %g' % i)\ny = np.cos(x)\nplt.plot(x, y, 'b-', label = 'expected')\nplt.ylim(-1.1,1.1)\nplt.legend()\nplt.savefig('cos_Taylor_series_diffeq.png')\nplt.show()\n\n\n'''\nTerminal> cos_Taylor_series_diffeq.py\"\n\nProcess finished with exit code 0\n'''", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
class Node: def __init__(self, data): self.data = data self.prev = None self.next = None class LinkedList: def __init__(self): self.head = None def insertAtHead(self, newNode, curNode): newNode.next = curNode if curNode is not None: curNode.prev = newNode self.head = newNode def insertAtTail(self, newNode, curNode): if self.head is None: self.head = newNode return while curNode.next is not None: curNode = curNode.next curNode.next = newNode newNode.prev = curNode def printForward(self, curNode): while curNode is not None: print(curNode.data) curNode = curNode.next def printReverse(self, curNode): while curNode.next is not None: curNode = curNode.next while curNode is not None: print(curNode.data) curNode = curNode.prev ################################################ linkedList = LinkedList() for i in range(3): newNode = Node(input("Enter data: ")) #linkedList.insertAtTail(newNode, linkedList.head) linkedList.insertAtHead(newNode, linkedList.head) linkedList.printForward(linkedList.head) print("######################") linkedList.printReverse(linkedList.head)
normal
{ "blob_id": "a3cbdecbbfc49e8ac045f4aabbea6b9f54ed3d5f", "index": 4904, "step-1": "<mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n <mask token>\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n", "step-2": "class Node:\n <mask token>\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n", "step-3": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\n", "step-4": "class Node:\n\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n\n def __init__(self):\n self.head = None\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None:\n curNode.prev = newNode\n self.head = newNode\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n while curNode.next is not None:\n curNode = curNode.next\n curNode.next = newNode\n newNode.prev = curNode\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n<mask token>\nfor i in range(3):\n newNode = Node(input('Enter data: '))\n linkedList.insertAtHead(newNode, linkedList.head)\nlinkedList.printForward(linkedList.head)\nprint('######################')\nlinkedList.printReverse(linkedList.head)\n", "step-5": "class Node:\n def __init__(self, data):\n self.data = data\n self.prev = None\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n\n def insertAtHead(self, newNode, curNode):\n newNode.next = curNode\n if curNode is not None: curNode.prev = newNode\n self.head = newNode\n\n\n def insertAtTail(self, newNode, curNode):\n if self.head is None:\n self.head = newNode\n return\n \n while curNode.next is not None:\n curNode = curNode.next\n \n curNode.next = newNode\n newNode.prev = curNode\n\n\n def printForward(self, curNode):\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.next\n\n\n def printReverse(self, curNode):\n while curNode.next is not None:\n curNode = curNode.next\n\n while curNode is not None:\n print(curNode.data)\n curNode = curNode.prev\n\n\n################################################\n\n\nlinkedList = LinkedList()\n\nfor i in range(3):\n newNode = Node(input(\"Enter data: \"))\n #linkedList.insertAtTail(newNode, linkedList.head)\n linkedList.insertAtHead(newNode, linkedList.head)\n\nlinkedList.printForward(linkedList.head)\nprint(\"######################\")\nlinkedList.printReverse(linkedList.head)", "step-ids": [ 5, 7, 8, 9, 11 ] }
[ 5, 7, 8, 9, 11 ]
/Users/andreilyskov/anaconda/lib/python3.5/sre_compile.py
normal
{ "blob_id": "faf4f4d26236ac555594ef6913a0d43c3516f1f2", "index": 2063, "step-1": "/Users/andreilyskov/anaconda/lib/python3.5/sre_compile.py", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import sys, os sys.path.append(os.pardir) import numpy as np from dataset.mnist import load_mnist from two_layer_net import TwoLayerNet (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True) train_loss_list = [] #hiper param iters_num = 1000 train_size = x_train.shape[0] batch_size = 100 learning_rate = 0.1 network = TwoLayerNet(input_size = 784, hidden_size=50, output_size=10) for i in range(iters_num): print(i) #get batch batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] #calc gradient grad = network.gradient(x_batch, t_batch) #update param for key in ('W1', 'b1', 'W2', 'b2'): network.params[key] -= learning_rate * grad[key] #recode loss = network.loss(x_batch, t_batch) train_loss_list.append(loss) print("{} : {}".format(i, train_loss_list[i])) print(train_loss_list)
normal
{ "blob_id": "dbe3aa107de8e62822803d1740773a4b22f41edf", "index": 971, "step-1": "<mask token>\n", "step-2": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n", "step-3": "<mask token>\nsys.path.append(os.pardir)\n<mask token>\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ntrain_loss_list = []\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n", "step-4": "import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom two_layer_net import TwoLayerNet\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ntrain_loss_list = []\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n", "step-5": "import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom two_layer_net import TwoLayerNet\n\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True)\n\ntrain_loss_list = []\n\n#hiper param\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\n\nnetwork = TwoLayerNet(input_size = 784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n #get batch\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n #calc gradient\n grad = network.gradient(x_batch, t_batch)\n\n #update param\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n\n\n #recode\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print(\"{} : {}\".format(i, train_loss_list[i]))\nprint(train_loss_list)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys sys.stdin = open('magnet.txt', 'r') from collections import deque def check(t, d, c): if t == 1: if m1[2] != m2[-2] and not c: check(t + 1, d * (-1), 1) if d == 1: m1.appendleft(m1.pop()) else: m1.append(m1.popleft()) elif t == 4: if m4[-2] != m3[2] and not c: check(t - 1, d * (-1), 4) if d == 1: m4.appendleft(m4.pop()) else: m4.append(m4.popleft()) elif t == 2: if m2[2] != m3[-2] and (not c or c == 1): check(t + 1, d * (-1), 2) if m2[-2] != m1[2] and (not c or c == 3): check(t - 1, d * (-1), 2) if d == 1: m2.appendleft(m2.pop()) else: m2.append(m2.popleft()) else: if m3[2] != m4[-2] and (not c or c == 2): check(t + 1, d * (-1), 3) if m3[-2] != m2[2] and (not c or c == 4): check(t - 1, d * (-1), 3) if d == 1: m3.appendleft(m3.pop()) else: m3.append(m3.popleft()) for test_case in range(1, int(input()) + 1): m1, m2, m3, m4 = deque(), deque(), deque(), deque() K = int(input()) for _ in range(4): if m1: if m2: if m3: m4 += list(map(int, input().split())) else: m3 += list(map(int, input().split())) else: m2 += list(map(int, input().split())) else: m1 += list(map(int, input().split())) for _ in range(K): touch, direction = map(int, input().split()) check(touch, direction, 0) result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0] print('#{} {}'.format(test_case, result))
normal
{ "blob_id": "7e3a5e1f19683b1716f3c988dcc1e65fee1cae13", "index": 8956, "step-1": "<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n", "step-3": "<mask token>\nsys.stdin = open('magnet.txt', 'r')\n<mask token>\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n", "step-4": "import sys\nsys.stdin = open('magnet.txt', 'r')\nfrom collections import deque\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * -1, 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * -1, 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * -1, 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * -1, 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * -1, 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * -1, 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n", "step-5": "import sys\nsys.stdin = open('magnet.txt', 'r')\nfrom collections import deque\n\n\ndef check(t, d, c):\n if t == 1:\n if m1[2] != m2[-2] and not c:\n check(t + 1, d * (-1), 1)\n if d == 1:\n m1.appendleft(m1.pop())\n else:\n m1.append(m1.popleft())\n elif t == 4:\n if m4[-2] != m3[2] and not c:\n check(t - 1, d * (-1), 4)\n if d == 1:\n m4.appendleft(m4.pop())\n else:\n m4.append(m4.popleft())\n elif t == 2:\n if m2[2] != m3[-2] and (not c or c == 1):\n check(t + 1, d * (-1), 2)\n if m2[-2] != m1[2] and (not c or c == 3):\n check(t - 1, d * (-1), 2)\n if d == 1:\n m2.appendleft(m2.pop())\n else:\n m2.append(m2.popleft())\n else:\n if m3[2] != m4[-2] and (not c or c == 2):\n check(t + 1, d * (-1), 3)\n if m3[-2] != m2[2] and (not c or c == 4):\n check(t - 1, d * (-1), 3)\n if d == 1:\n m3.appendleft(m3.pop())\n else:\n m3.append(m3.popleft())\n\n\nfor test_case in range(1, int(input()) + 1):\n m1, m2, m3, m4 = deque(), deque(), deque(), deque()\n K = int(input())\n for _ in range(4):\n if m1:\n if m2:\n if m3:\n m4 += list(map(int, input().split()))\n else:\n m3 += list(map(int, input().split()))\n else:\n m2 += list(map(int, input().split()))\n else:\n m1 += list(map(int, input().split()))\n for _ in range(K):\n touch, direction = map(int, input().split())\n check(touch, direction, 0)\n result = m1[0] + 2 * m2[0] + 4 * m3[0] + 8 * m4[0]\n print('#{} {}'.format(test_case, result))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import boto3 import pprint import yaml #initialize empty dictionary to store values new_dict = {} count = 0 new_dict2 = {} # dev = boto3.session.Session(profile_name='shipt') mybatch = boto3.client('batch') #load config properties with open('config.yml') as f: content = yaml.load(f) # pprint.pprint(content) #to print config properties in file #get current job definition response = mybatch.describe_job_definitions( jobDefinitions = [ 'axiom-staging-abcfinewine:1' # 'axiom-staging-costco:1' ], status='ACTIVE' ) # print(type(response)) for k, v in response.items(): if k == 'jobDefinitions': # pprint.pprint(v) #to print container properties # pprint.pprint(v[0]['containerProperties']) new_dict = v[0]['containerProperties'] #check if config properties match with current job definition properties # for key in new_dict.keys(): # if key in content.keys(): # count = count + 1 # if content[key] == new_dict[key]: # new_dict2[key] == content[key] print(content.items()) # new_dict2 = dict(content.items() & new_dict.items()) print(new_dict2) # if v == new_dict[k]: # # print('woooh00!') # print(content[k]) # print(v) # print(new_dict[k]) # for k,v in new_dict.items(): # print(v) # if content != new_dict: # print('\n\n\n\twooohooo!') # print(response) # pp = pprint.PrettyPrinter(indent = 4) # pp.pprint(response)
normal
{ "blob_id": "3ba9ff00b0d6a2006c714a9818c8b561d884e252", "index": 2302, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('config.yml') as f:\n content = yaml.load(f)\n<mask token>\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n", "step-3": "<mask token>\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n", "step-4": "import boto3\nimport pprint\nimport yaml\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\nmybatch = boto3.client('batch')\nwith open('config.yml') as f:\n content = yaml.load(f)\nresponse = mybatch.describe_job_definitions(jobDefinitions=[\n 'axiom-staging-abcfinewine:1'], status='ACTIVE')\nfor k, v in response.items():\n if k == 'jobDefinitions':\n new_dict = v[0]['containerProperties']\nprint(content.items())\nprint(new_dict2)\n", "step-5": "import boto3\nimport pprint\nimport yaml\n\n#initialize empty dictionary to store values\nnew_dict = {}\ncount = 0\nnew_dict2 = {}\n\n# dev = boto3.session.Session(profile_name='shipt')\nmybatch = boto3.client('batch')\n\n#load config properties\nwith open('config.yml') as f:\n content = yaml.load(f)\n\n# pprint.pprint(content) #to print config properties in file\n\n#get current job definition\nresponse = mybatch.describe_job_definitions(\n jobDefinitions = [\n 'axiom-staging-abcfinewine:1'\n # 'axiom-staging-costco:1'\n ],\n status='ACTIVE'\n)\n\n# print(type(response))\n\nfor k, v in response.items():\n if k == 'jobDefinitions':\n # pprint.pprint(v) #to print container properties\n # pprint.pprint(v[0]['containerProperties'])\n new_dict = v[0]['containerProperties']\n\n\n#check if config properties match with current job definition properties\n # for key in new_dict.keys():\n # if key in content.keys():\n # count = count + 1\n # if content[key] == new_dict[key]:\n # new_dict2[key] == content[key]\n\nprint(content.items())\n# new_dict2 = dict(content.items() & new_dict.items())\n\nprint(new_dict2)\n # if v == new_dict[k]:\n # # print('woooh00!')\n # print(content[k])\n # print(v)\n # print(new_dict[k])\n\n# for k,v in new_dict.items():\n# print(v)\n# if content != new_dict:\n# print('\\n\\n\\n\\twooohooo!')\n\n\n# print(response)\n# pp = pprint.PrettyPrinter(indent = 4)\n# pp.pprint(response)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('levantamiento', '0001_initial'), ] operations = [ migrations.CreateModel( name='FichaTecnica', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('numero', models.IntegerField(default=0)), ('largo', models.FloatField(default=0)), ('ancho', models.FloatField(default=0)), ('alto', models.FloatField(default=0)), ('parcial', models.IntegerField(default=0)), ('unidad', models.IntegerField(default=0)), ('punitario', models.IntegerField(default=0)), ('form', models.ForeignKey(related_name='ficha_tecnica', to='levantamiento.Levantamiento')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Metrado1', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('codigo', models.CharField(max_length=25)), ('descripcion', models.TextField()), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Metrado2', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('codigo', models.CharField(max_length=25)), ('descripcion', models.TextField()), ('metrado1', models.ForeignKey(related_name='metrado_2', to='metrados.Metrado1')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Metrado3', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('codigo', models.CharField(max_length=25)), ('descripcion', models.TextField()), ('metrado2', models.ForeignKey(related_name='metrado_3', to='metrados.Metrado2')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Metrado4', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('codigo', models.CharField(max_length=25)), ('descripcion', models.TextField()), ('metrado3', models.ForeignKey(related_name='metrado_4', to='metrados.Metrado3')), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='fichatecnica', name='metrado1', field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado1'), preserve_default=True, ), migrations.AddField( model_name='fichatecnica', name='metrado2', field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado2'), preserve_default=True, ), migrations.AddField( model_name='fichatecnica', name='metrado3', field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado3'), preserve_default=True, ), migrations.AddField( model_name='fichatecnica', name='metrado4', field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado4'), preserve_default=True, ), ]
normal
{ "blob_id": "1049a7d2cdc54c489af6246ec014deb63a98f96d", "index": 3951, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('levantamiento', '0001_initial')]\n operations = [migrations.CreateModel(name='FichaTecnica', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('numero', models.IntegerField(default=0)\n ), ('largo', models.FloatField(default=0)), ('ancho', models.\n FloatField(default=0)), ('alto', models.FloatField(default=0)), (\n 'parcial', models.IntegerField(default=0)), ('unidad', models.\n IntegerField(default=0)), ('punitario', models.IntegerField(default\n =0)), ('form', models.ForeignKey(related_name='ficha_tecnica', to=\n 'levantamiento.Levantamiento'))], options={}, bases=(models.Model,)\n ), migrations.CreateModel(name='Metrado1', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('codigo', models.CharField(max_length=25)), (\n 'descripcion', models.TextField())], options={}, bases=(models.\n Model,)), migrations.CreateModel(name='Metrado2', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('codigo', models.CharField(max_length=25\n )), ('descripcion', models.TextField()), ('metrado1', models.\n ForeignKey(related_name='metrado_2', to='metrados.Metrado1'))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'Metrado3', fields=[('id', models.AutoField(verbose_name='ID',\n serialize=False, auto_created=True, primary_key=True)), ('codigo',\n models.CharField(max_length=25)), ('descripcion', models.TextField(\n )), ('metrado2', models.ForeignKey(related_name='metrado_3', to=\n 'metrados.Metrado2'))], options={}, bases=(models.Model,)),\n migrations.CreateModel(name='Metrado4', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('codigo', models.CharField(max_length=25)), (\n 'descripcion', models.TextField()), ('metrado3', models.ForeignKey(\n related_name='metrado_4', to='metrados.Metrado3'))], options={},\n bases=(models.Model,)), migrations.AddField(model_name=\n 'fichatecnica', name='metrado1', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado1'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado2', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado2'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado3', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado3'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado4', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado4'),\n preserve_default=True)]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('levantamiento', '0001_initial')]\n operations = [migrations.CreateModel(name='FichaTecnica', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('numero', models.IntegerField(default=0)\n ), ('largo', models.FloatField(default=0)), ('ancho', models.\n FloatField(default=0)), ('alto', models.FloatField(default=0)), (\n 'parcial', models.IntegerField(default=0)), ('unidad', models.\n IntegerField(default=0)), ('punitario', models.IntegerField(default\n =0)), ('form', models.ForeignKey(related_name='ficha_tecnica', to=\n 'levantamiento.Levantamiento'))], options={}, bases=(models.Model,)\n ), migrations.CreateModel(name='Metrado1', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('codigo', models.CharField(max_length=25)), (\n 'descripcion', models.TextField())], options={}, bases=(models.\n Model,)), migrations.CreateModel(name='Metrado2', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('codigo', models.CharField(max_length=25\n )), ('descripcion', models.TextField()), ('metrado1', models.\n ForeignKey(related_name='metrado_2', to='metrados.Metrado1'))],\n options={}, bases=(models.Model,)), migrations.CreateModel(name=\n 'Metrado3', fields=[('id', models.AutoField(verbose_name='ID',\n serialize=False, auto_created=True, primary_key=True)), ('codigo',\n models.CharField(max_length=25)), ('descripcion', models.TextField(\n )), ('metrado2', models.ForeignKey(related_name='metrado_3', to=\n 'metrados.Metrado2'))], options={}, bases=(models.Model,)),\n migrations.CreateModel(name='Metrado4', fields=[('id', models.\n AutoField(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)), ('codigo', models.CharField(max_length=25)), (\n 'descripcion', models.TextField()), ('metrado3', models.ForeignKey(\n related_name='metrado_4', to='metrados.Metrado3'))], options={},\n bases=(models.Model,)), migrations.AddField(model_name=\n 'fichatecnica', name='metrado1', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado1'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado2', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado2'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado3', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado3'),\n preserve_default=True), migrations.AddField(model_name=\n 'fichatecnica', name='metrado4', field=models.ForeignKey(\n related_name='ficha_tecnica', to='metrados.Metrado4'),\n preserve_default=True)]\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('levantamiento', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FichaTecnica',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('numero', models.IntegerField(default=0)),\n ('largo', models.FloatField(default=0)),\n ('ancho', models.FloatField(default=0)),\n ('alto', models.FloatField(default=0)),\n ('parcial', models.IntegerField(default=0)),\n ('unidad', models.IntegerField(default=0)),\n ('punitario', models.IntegerField(default=0)),\n ('form', models.ForeignKey(related_name='ficha_tecnica', to='levantamiento.Levantamiento')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Metrado1',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigo', models.CharField(max_length=25)),\n ('descripcion', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Metrado2',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigo', models.CharField(max_length=25)),\n ('descripcion', models.TextField()),\n ('metrado1', models.ForeignKey(related_name='metrado_2', to='metrados.Metrado1')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Metrado3',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigo', models.CharField(max_length=25)),\n ('descripcion', models.TextField()),\n ('metrado2', models.ForeignKey(related_name='metrado_3', to='metrados.Metrado2')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Metrado4',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('codigo', models.CharField(max_length=25)),\n ('descripcion', models.TextField()),\n ('metrado3', models.ForeignKey(related_name='metrado_4', to='metrados.Metrado3')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='fichatecnica',\n name='metrado1',\n field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado1'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='fichatecnica',\n name='metrado2',\n field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado2'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='fichatecnica',\n name='metrado3',\n field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado3'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='fichatecnica',\n name='metrado4',\n field=models.ForeignKey(related_name='ficha_tecnica', to='metrados.Metrado4'),\n preserve_default=True,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Developed by Lorenzo Mambretti, Justin Wang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://github.com/jtwwang/hanabi/blob/master/LICENSE # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied # import rl_env import numpy as np import os import sys import random import getopt import pickle from agents.neuroEvo_agent import NeuroEvoAgent from predictors.conv_pred import conv_pred # To find local modules sys.path.insert(0, os.path.join(os.getcwd(), 'agents')) def model_crossover(weights1, weights2): new_weights = [] assert len(weights1) == len(weights2) if random.uniform(0, 1) > 0.3: print("crossover") for layer in range(len(weights1)): # alternate odd and even layers if layer % 2 == 0: new_weights.append(weights1[layer]) else: new_weights.append(weights2[layer]) else: print("no crossover") new_weights = weights1 return new_weights def mutate_weights(weights): for xi in range(len(weights)): for yi in range(len(weights[xi])): if random.uniform(0, 1) > 0.9: change = random.uniform(-0.1, 0.1) weights[xi][yi] += change return weights def make_mutation(ix_to_mutate, best_ones): p = np.sort(scores)[2:] p = p / np.sum(p) # select the weights from parents randomA = np.random.choice(best_ones, p=p) randomB = np.random.choice(best_ones, p=p) while randomB == randomA: randomB = np.random.choice(best_ones, p=p) weights1 = weights[randomA] weights2 = weights[randomB] # generate new weights new_weights = model_crossover(weights1, weights2) new_weights = mutate_weights(new_weights) # change the weights of the target agent weights[ix_to_mutate] = new_weights def run(ix, initialize=False): # initialize env env = rl_env.make('Hanabi-Full', num_players=flags['players']) agent_config = { 'players': flags['players'], 'num_moves': env.num_moves(), 'observation_size': env.vectorized_observation_shape()[0], 'model_name': str(ix), 'initialize': initialize} agent = NeuroEvoAgent(agent_config) avg_reward = 0 avg_steps = 0 for eps in range(flags['num_episodes']): obs = env.reset() # Observation of all players done = False agent_id = 0 while not done: ob = obs['player_observations'][agent_id] try: action = agent.act(ob) except ValueError: print('Something went wrong. Try to reinitialize the agents' 'pool by using --initialize True') exit() obs, reward, done, _ = env.step(action) avg_reward += reward avg_steps += 1 if done: break # change player agent_id = (agent_id + 1) % flags['players'] n_eps = float(flags['num_episodes']) avg_steps /= n_eps avg_reward /= n_eps agent.save(model_name=str(ix)) scores[ix] = avg_reward * 1000 + avg_steps if __name__ == "__main__": global flags, scores, weights flags = {'players': 2, 'num_episodes': 100, 'initialize': False, 'models': 20, 'generations': 100} options, arguments = getopt.getopt(sys.argv[1:], '', ['players=', 'num_episodes=', 'initialize=', 'models=', 'generations=']) if arguments: sys.exit('usage: neuroEvo.py [options]\n' '--players number of players in the game.\n' '--num_episodes number of game episodes to run.\n' '--initialize whether to re-initialize the weights' 'for all agents.\n') for flag, value in options: flag = flag[2:] # Strip leading --. flags[flag] = type(flags[flag])(value) # Initialize all models current_pool = [] scores = np.zeros(flags['models']) weights = {} to_mutate = 0 # create one agent agent = conv_pred("NeuroEvo_agent") # load the file filepath = os.path.join("model", "NeuroEvo_agent") filepath = os.path.join(filepath, "scores.pickle") if not flags['initialize']: try: scores = pickle.load(open(filepath, "rb")) loaded = True except IOError: loaded = False else: loaded = False print("Initialize") # do an initial loop to evaluate all models for i in range(flags['models']): if flags['initialize'] or not loaded: run(i, flags['initialize']) agent.load(model_name=str(i)) weights[i] = agent.model.get_weights() for gen in range(flags['generations']): print("Generation %i " % gen) # sort the results ranking = np.argsort(scores) print("best: %i with score %f" % (ranking[-1], scores[ranking[-1]])) print("worst: %i with score %f" % (ranking[0], scores[ranking[0]])) print("avg: %f" % (sum(scores)/flags['models'])) # divide worst from best worst_ones = ranking[:2] best_ones = ranking[2:] # select the one to mutate and the one to use for the simulation ix_to_mutate = worst_ones[to_mutate] ix_to_simulate = worst_ones[1 - to_mutate] run(ix_to_simulate) make_mutation(ix_to_mutate, best_ones) # update weights of mutated agent agent.model.set_weights(weights[ix_to_mutate]) agent.save(model_name=str(ix_to_mutate)) # prepare for next generation to_mutate = (to_mutate + 1) % 2 # save the rankings pickle.dump(scores, open(filepath, "wb")) print("Saved scores.")
normal
{ "blob_id": "bbd5eb1f80843efdd2709aa19a65bf325a88f473", "index": 8856, "step-1": "<mask token>\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\n<mask token>\n", "step-3": "<mask token>\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\nif __name__ == '__main__':\n global flags, scores, weights\n flags = {'players': 2, 'num_episodes': 100, 'initialize': False,\n 'models': 20, 'generations': 100}\n options, arguments = getopt.getopt(sys.argv[1:], '', ['players=',\n 'num_episodes=', 'initialize=', 'models=', 'generations='])\n if arguments:\n sys.exit(\n \"\"\"usage: neuroEvo.py [options]\n--players number of players in the game.\n--num_episodes number of game episodes to run.\n--initialize whether to re-initialize the weightsfor all agents.\n\"\"\"\n )\n for flag, value in options:\n flag = flag[2:]\n flags[flag] = type(flags[flag])(value)\n current_pool = []\n scores = np.zeros(flags['models'])\n weights = {}\n to_mutate = 0\n agent = conv_pred('NeuroEvo_agent')\n filepath = os.path.join('model', 'NeuroEvo_agent')\n filepath = os.path.join(filepath, 'scores.pickle')\n if not flags['initialize']:\n try:\n scores = pickle.load(open(filepath, 'rb'))\n loaded = True\n except IOError:\n loaded = False\n else:\n loaded = False\n print('Initialize')\n for i in range(flags['models']):\n if flags['initialize'] or not loaded:\n run(i, flags['initialize'])\n agent.load(model_name=str(i))\n weights[i] = agent.model.get_weights()\n for gen in range(flags['generations']):\n print('Generation %i ' % gen)\n ranking = np.argsort(scores)\n print('best: %i with score %f' % (ranking[-1], scores[ranking[-1]]))\n print('worst: %i with score %f' % (ranking[0], scores[ranking[0]]))\n print('avg: %f' % (sum(scores) / flags['models']))\n worst_ones = ranking[:2]\n best_ones = ranking[2:]\n ix_to_mutate = worst_ones[to_mutate]\n ix_to_simulate = worst_ones[1 - to_mutate]\n run(ix_to_simulate)\n make_mutation(ix_to_mutate, best_ones)\n agent.model.set_weights(weights[ix_to_mutate])\n agent.save(model_name=str(ix_to_mutate))\n to_mutate = (to_mutate + 1) % 2\n pickle.dump(scores, open(filepath, 'wb'))\n print('Saved scores.')\n", "step-4": "import rl_env\nimport numpy as np\nimport os\nimport sys\nimport random\nimport getopt\nimport pickle\nfrom agents.neuroEvo_agent import NeuroEvoAgent\nfrom predictors.conv_pred import conv_pred\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\n\n\ndef model_crossover(weights1, weights2):\n new_weights = []\n assert len(weights1) == len(weights2)\n if random.uniform(0, 1) > 0.3:\n print('crossover')\n for layer in range(len(weights1)):\n if layer % 2 == 0:\n new_weights.append(weights1[layer])\n else:\n new_weights.append(weights2[layer])\n else:\n print('no crossover')\n new_weights = weights1\n return new_weights\n\n\ndef mutate_weights(weights):\n for xi in range(len(weights)):\n for yi in range(len(weights[xi])):\n if random.uniform(0, 1) > 0.9:\n change = random.uniform(-0.1, 0.1)\n weights[xi][yi] += change\n return weights\n\n\ndef make_mutation(ix_to_mutate, best_ones):\n p = np.sort(scores)[2:]\n p = p / np.sum(p)\n randomA = np.random.choice(best_ones, p=p)\n randomB = np.random.choice(best_ones, p=p)\n while randomB == randomA:\n randomB = np.random.choice(best_ones, p=p)\n weights1 = weights[randomA]\n weights2 = weights[randomB]\n new_weights = model_crossover(weights1, weights2)\n new_weights = mutate_weights(new_weights)\n weights[ix_to_mutate] = new_weights\n\n\ndef run(ix, initialize=False):\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\n agent_config = {'players': flags['players'], 'num_moves': env.num_moves\n (), 'observation_size': env.vectorized_observation_shape()[0],\n 'model_name': str(ix), 'initialize': initialize}\n agent = NeuroEvoAgent(agent_config)\n avg_reward = 0\n avg_steps = 0\n for eps in range(flags['num_episodes']):\n obs = env.reset()\n done = False\n agent_id = 0\n while not done:\n ob = obs['player_observations'][agent_id]\n try:\n action = agent.act(ob)\n except ValueError:\n print(\n 'Something went wrong. Try to reinitialize the agentspool by using --initialize True'\n )\n exit()\n obs, reward, done, _ = env.step(action)\n avg_reward += reward\n avg_steps += 1\n if done:\n break\n agent_id = (agent_id + 1) % flags['players']\n n_eps = float(flags['num_episodes'])\n avg_steps /= n_eps\n avg_reward /= n_eps\n agent.save(model_name=str(ix))\n scores[ix] = avg_reward * 1000 + avg_steps\n\n\nif __name__ == '__main__':\n global flags, scores, weights\n flags = {'players': 2, 'num_episodes': 100, 'initialize': False,\n 'models': 20, 'generations': 100}\n options, arguments = getopt.getopt(sys.argv[1:], '', ['players=',\n 'num_episodes=', 'initialize=', 'models=', 'generations='])\n if arguments:\n sys.exit(\n \"\"\"usage: neuroEvo.py [options]\n--players number of players in the game.\n--num_episodes number of game episodes to run.\n--initialize whether to re-initialize the weightsfor all agents.\n\"\"\"\n )\n for flag, value in options:\n flag = flag[2:]\n flags[flag] = type(flags[flag])(value)\n current_pool = []\n scores = np.zeros(flags['models'])\n weights = {}\n to_mutate = 0\n agent = conv_pred('NeuroEvo_agent')\n filepath = os.path.join('model', 'NeuroEvo_agent')\n filepath = os.path.join(filepath, 'scores.pickle')\n if not flags['initialize']:\n try:\n scores = pickle.load(open(filepath, 'rb'))\n loaded = True\n except IOError:\n loaded = False\n else:\n loaded = False\n print('Initialize')\n for i in range(flags['models']):\n if flags['initialize'] or not loaded:\n run(i, flags['initialize'])\n agent.load(model_name=str(i))\n weights[i] = agent.model.get_weights()\n for gen in range(flags['generations']):\n print('Generation %i ' % gen)\n ranking = np.argsort(scores)\n print('best: %i with score %f' % (ranking[-1], scores[ranking[-1]]))\n print('worst: %i with score %f' % (ranking[0], scores[ranking[0]]))\n print('avg: %f' % (sum(scores) / flags['models']))\n worst_ones = ranking[:2]\n best_ones = ranking[2:]\n ix_to_mutate = worst_ones[to_mutate]\n ix_to_simulate = worst_ones[1 - to_mutate]\n run(ix_to_simulate)\n make_mutation(ix_to_mutate, best_ones)\n agent.model.set_weights(weights[ix_to_mutate])\n agent.save(model_name=str(ix_to_mutate))\n to_mutate = (to_mutate + 1) % 2\n pickle.dump(scores, open(filepath, 'wb'))\n print('Saved scores.')\n", "step-5": "# Developed by Lorenzo Mambretti, Justin Wang\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# https://github.com/jtwwang/hanabi/blob/master/LICENSE\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\r\n#\r\nimport rl_env\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport random\r\nimport getopt\r\nimport pickle\r\nfrom agents.neuroEvo_agent import NeuroEvoAgent\r\nfrom predictors.conv_pred import conv_pred\r\n# To find local modules\r\nsys.path.insert(0, os.path.join(os.getcwd(), 'agents'))\r\n\r\n\r\ndef model_crossover(weights1, weights2):\r\n\r\n new_weights = []\r\n assert len(weights1) == len(weights2)\r\n if random.uniform(0, 1) > 0.3:\r\n print(\"crossover\")\r\n for layer in range(len(weights1)):\r\n # alternate odd and even layers\r\n if layer % 2 == 0:\r\n new_weights.append(weights1[layer])\r\n else:\r\n new_weights.append(weights2[layer])\r\n else:\r\n print(\"no crossover\")\r\n new_weights = weights1\r\n\r\n return new_weights\r\n\r\n\r\ndef mutate_weights(weights):\r\n for xi in range(len(weights)):\r\n for yi in range(len(weights[xi])):\r\n if random.uniform(0, 1) > 0.9:\r\n change = random.uniform(-0.1, 0.1)\r\n weights[xi][yi] += change\r\n return weights\r\n\r\n\r\ndef make_mutation(ix_to_mutate, best_ones):\r\n\r\n p = np.sort(scores)[2:]\r\n p = p / np.sum(p)\r\n\r\n # select the weights from parents\r\n randomA = np.random.choice(best_ones, p=p)\r\n randomB = np.random.choice(best_ones, p=p)\r\n while randomB == randomA:\r\n randomB = np.random.choice(best_ones, p=p)\r\n weights1 = weights[randomA]\r\n weights2 = weights[randomB]\r\n\r\n # generate new weights\r\n new_weights = model_crossover(weights1, weights2)\r\n new_weights = mutate_weights(new_weights)\r\n\r\n # change the weights of the target agent\r\n weights[ix_to_mutate] = new_weights\r\n\r\n\r\ndef run(ix, initialize=False):\r\n\r\n # initialize env\r\n env = rl_env.make('Hanabi-Full', num_players=flags['players'])\r\n agent_config = {\r\n 'players': flags['players'],\r\n 'num_moves': env.num_moves(),\r\n 'observation_size': env.vectorized_observation_shape()[0],\r\n 'model_name': str(ix),\r\n 'initialize': initialize}\r\n\r\n agent = NeuroEvoAgent(agent_config)\r\n\r\n avg_reward = 0\r\n avg_steps = 0\r\n\r\n for eps in range(flags['num_episodes']):\r\n obs = env.reset() # Observation of all players\r\n done = False\r\n agent_id = 0\r\n\r\n while not done:\r\n ob = obs['player_observations'][agent_id]\r\n\r\n try:\r\n action = agent.act(ob)\r\n except ValueError:\r\n print('Something went wrong. Try to reinitialize the agents'\r\n 'pool by using --initialize True')\r\n exit()\r\n\r\n obs, reward, done, _ = env.step(action)\r\n\r\n avg_reward += reward\r\n avg_steps += 1\r\n\r\n if done:\r\n break\r\n\r\n # change player\r\n agent_id = (agent_id + 1) % flags['players']\r\n\r\n n_eps = float(flags['num_episodes'])\r\n avg_steps /= n_eps\r\n avg_reward /= n_eps\r\n\r\n agent.save(model_name=str(ix))\r\n scores[ix] = avg_reward * 1000 + avg_steps\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n global flags, scores, weights\r\n flags = {'players': 2,\r\n 'num_episodes': 100,\r\n 'initialize': False,\r\n 'models': 20,\r\n 'generations': 100}\r\n\r\n options, arguments = getopt.getopt(sys.argv[1:], '',\r\n ['players=',\r\n 'num_episodes=',\r\n 'initialize=',\r\n 'models=',\r\n 'generations='])\r\n if arguments:\r\n sys.exit('usage: neuroEvo.py [options]\\n'\r\n '--players number of players in the game.\\n'\r\n '--num_episodes number of game episodes to run.\\n'\r\n '--initialize whether to re-initialize the weights'\r\n 'for all agents.\\n')\r\n\r\n for flag, value in options:\r\n flag = flag[2:] # Strip leading --.\r\n flags[flag] = type(flags[flag])(value)\r\n\r\n # Initialize all models\r\n current_pool = []\r\n scores = np.zeros(flags['models'])\r\n weights = {}\r\n to_mutate = 0\r\n\r\n # create one agent\r\n agent = conv_pred(\"NeuroEvo_agent\")\r\n\r\n # load the file\r\n filepath = os.path.join(\"model\", \"NeuroEvo_agent\")\r\n filepath = os.path.join(filepath, \"scores.pickle\")\r\n if not flags['initialize']:\r\n try:\r\n scores = pickle.load(open(filepath, \"rb\"))\r\n loaded = True\r\n except IOError:\r\n loaded = False\r\n else:\r\n loaded = False\r\n\r\n print(\"Initialize\")\r\n # do an initial loop to evaluate all models\r\n for i in range(flags['models']):\r\n if flags['initialize'] or not loaded:\r\n run(i, flags['initialize'])\r\n agent.load(model_name=str(i))\r\n weights[i] = agent.model.get_weights()\r\n\r\n for gen in range(flags['generations']):\r\n\r\n print(\"Generation %i \" % gen)\r\n\r\n # sort the results\r\n ranking = np.argsort(scores)\r\n print(\"best: %i with score %f\" % (ranking[-1], scores[ranking[-1]]))\r\n print(\"worst: %i with score %f\" % (ranking[0], scores[ranking[0]]))\r\n print(\"avg: %f\" % (sum(scores)/flags['models']))\r\n\r\n # divide worst from best\r\n worst_ones = ranking[:2]\r\n best_ones = ranking[2:]\r\n\r\n # select the one to mutate and the one to use for the simulation\r\n ix_to_mutate = worst_ones[to_mutate]\r\n ix_to_simulate = worst_ones[1 - to_mutate]\r\n\r\n run(ix_to_simulate)\r\n make_mutation(ix_to_mutate, best_ones)\r\n\r\n # update weights of mutated agent\r\n agent.model.set_weights(weights[ix_to_mutate])\r\n agent.save(model_name=str(ix_to_mutate))\r\n\r\n # prepare for next generation\r\n to_mutate = (to_mutate + 1) % 2\r\n\r\n # save the rankings\r\n pickle.dump(scores, open(filepath, \"wb\"))\r\n print(\"Saved scores.\")\r\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import sqlite3 import pandas as pd #%matplotlib inline import matplotlib.pyplot as plt db_filename = 'readonly/dinofunworld.db' conn = sqlite3.connect(db_filename) c = conn.cursor() c.execute("SELECT a.Name, count(c.visitorID) \ FROM attraction as a, checkin c \ WHERE \ a.AttractionID = c.attraction \ AND a.Category like 'Thrill Rides%' \ GROUP BY a.AttractionID \ ") thrillRidesVisitsResult = c.fetchall() print(thrillRidesVisitsResult) thrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult, columns=['ride_name', 'visits_count']) c.close() plt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False) plt.axis('equal') plt.show()
normal
{ "blob_id": "c19c3f580d7555379bd7e077b0264a3784179e93", "index": 696, "step-1": "<mask token>\n", "step-2": "<mask token>\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\n<mask token>\nprint(thrillRidesVisitsResult)\n<mask token>\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n", "step-3": "<mask token>\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,\n columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n", "step-4": "import sqlite3\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\n \"SELECT a.Name, count(c.visitorID) FROM attraction as a, checkin c WHERE a.AttractionID = c.attraction AND a.Category like 'Thrill Rides%' GROUP BY a.AttractionID \"\n )\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult,\n columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=\n thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()\n", "step-5": "import sqlite3\nimport pandas as pd\n#%matplotlib inline\nimport matplotlib.pyplot as plt\n\ndb_filename = 'readonly/dinofunworld.db'\nconn = sqlite3.connect(db_filename)\nc = conn.cursor()\nc.execute(\"SELECT a.Name, count(c.visitorID) \\\nFROM attraction as a, checkin c \\\nWHERE \\\na.AttractionID = c.attraction \\\nAND a.Category like 'Thrill Rides%' \\\nGROUP BY a.AttractionID \\\n\")\nthrillRidesVisitsResult = c.fetchall()\nprint(thrillRidesVisitsResult)\nthrillRidesVisitsDataFrame = pd.DataFrame.from_records(thrillRidesVisitsResult, columns=['ride_name', 'visits_count'])\nc.close()\nplt.pie(thrillRidesVisitsDataFrame['visits_count'], labels=thrillRidesVisitsDataFrame['ride_name'], autopct='%1.1f%%', shadow=False)\nplt.axis('equal')\nplt.show()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import random consonants = [ 'b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z' ] vowels = [ 'a', 'e',' i', 'o', 'u' ] def make_word(user_input): word = "" for letter in user_input: letter = letter.lower() if letter == 'c': word += random.choice(consonants) elif letter == 'v': word += random.choice(vowels) elif letter.isspace(): word += ' ' else : print('Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n') return word def main(): pattern = input('Enter your lexical pattern, c for consonant. v for vowel\n') print(make_word(pattern)) main()
normal
{ "blob_id": "a4f4137b9310ebc68515b9cae841051eda1f0360", "index": 3522, "step-1": "<mask token>\n\n\ndef make_word(user_input):\n word = ''\n for letter in user_input:\n letter = letter.lower()\n if letter == 'c':\n word += random.choice(consonants)\n elif letter == 'v':\n word += random.choice(vowels)\n elif letter.isspace():\n word += ' '\n else:\n print(\n \"\"\"Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n\"\"\"\n )\n return word\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef make_word(user_input):\n word = ''\n for letter in user_input:\n letter = letter.lower()\n if letter == 'c':\n word += random.choice(consonants)\n elif letter == 'v':\n word += random.choice(vowels)\n elif letter.isspace():\n word += ' '\n else:\n print(\n \"\"\"Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n\"\"\"\n )\n return word\n\n\ndef main():\n pattern = input(\n 'Enter your lexical pattern, c for consonant. v for vowel\\n')\n print(make_word(pattern))\n\n\n<mask token>\n", "step-3": "<mask token>\nconsonants = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p',\n 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z']\nvowels = ['a', 'e', ' i', 'o', 'u']\n\n\ndef make_word(user_input):\n word = ''\n for letter in user_input:\n letter = letter.lower()\n if letter == 'c':\n word += random.choice(consonants)\n elif letter == 'v':\n word += random.choice(vowels)\n elif letter.isspace():\n word += ' '\n else:\n print(\n \"\"\"Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n\"\"\"\n )\n return word\n\n\ndef main():\n pattern = input(\n 'Enter your lexical pattern, c for consonant. v for vowel\\n')\n print(make_word(pattern))\n\n\nmain()\n", "step-4": "import random\nconsonants = ['b', 'c', 'd', 'f', 'g', 'h', 'j', 'k', 'l', 'm', 'n', 'p',\n 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z']\nvowels = ['a', 'e', ' i', 'o', 'u']\n\n\ndef make_word(user_input):\n word = ''\n for letter in user_input:\n letter = letter.lower()\n if letter == 'c':\n word += random.choice(consonants)\n elif letter == 'v':\n word += random.choice(vowels)\n elif letter.isspace():\n word += ' '\n else:\n print(\n \"\"\"Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\n\"\"\"\n )\n return word\n\n\ndef main():\n pattern = input(\n 'Enter your lexical pattern, c for consonant. v for vowel\\n')\n print(make_word(pattern))\n\n\nmain()\n", "step-5": "import random\n\n\nconsonants = [\n 'b', 'c', 'd', 'f', 'g',\n 'h', 'j', 'k', 'l', 'm',\n 'n', 'p', 'q', 'r', 's',\n 't', 'v', 'w', 'x', 'y',\n 'z'\n]\nvowels = [\n 'a', 'e',' i', 'o', 'u'\n]\n\ndef make_word(user_input):\n word = \"\"\n\n for letter in user_input:\n letter = letter.lower()\n if letter == 'c':\n word += random.choice(consonants)\n elif letter == 'v':\n word += random.choice(vowels)\n elif letter.isspace():\n word += ' '\n else :\n print('Incorrect character passed. You must supply either a [c]onsonant, or a [vowel]\\n')\n return word\n\ndef main():\n pattern = input('Enter your lexical pattern, c for consonant. v for vowel\\n')\n print(make_word(pattern))\n\nmain()\n", "step-ids": [ 1, 2, 4, 5, 6 ] }
[ 1, 2, 4, 5, 6 ]
class Solution: # @param num, a list of integer # @return an integer def longestConsecutive(self, num): sted = {} n = len(num) for item in num: if item in sted: continue sted[item] = item if item-1 in sted: sted[item] = sted[item-1] sted[sted[item-1]] = item if item+1 in sted: tmp = sted[item+1] sted[tmp] = sted[item] sted[sted[item]] = tmp res = 0 for item in sted: res = max(res, sted[item] - item) return res + 1
normal
{ "blob_id": "d7c4bee7245dab1cbb90ee68b8e99994ce7dd219", "index": 3295, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def longestConsecutive(self, num):\n sted = {}\n n = len(num)\n for item in num:\n if item in sted:\n continue\n sted[item] = item\n if item - 1 in sted:\n sted[item] = sted[item - 1]\n sted[sted[item - 1]] = item\n if item + 1 in sted:\n tmp = sted[item + 1]\n sted[tmp] = sted[item]\n sted[sted[item]] = tmp\n res = 0\n for item in sted:\n res = max(res, sted[item] - item)\n return res + 1\n", "step-4": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def longestConsecutive(self, num):\n sted = {}\n n = len(num)\n for item in num:\n if item in sted:\n continue\n sted[item] = item\n if item-1 in sted:\n sted[item] = sted[item-1]\n sted[sted[item-1]] = item\n if item+1 in sted:\n tmp = sted[item+1]\n sted[tmp] = sted[item]\n sted[sted[item]] = tmp\n \n res = 0\n for item in sted:\n res = max(res, sted[item] - item)\n return res + 1", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from classNinapro import Ninapro import numpy as np import tensorflow as tf print(tf.__version__) Debug = True # for tensor dimensionality checking ninapro = Ninapro() ninapro.splitImagesLabels() # Train print('ninapro.TrainImages shape: ', ninapro.TrainImages.shape) # m x 16 x 30 print('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape) # m x 8 # Test print('ninapro.TestImages shape: ', ninapro.TestImages.shape) # m x 16 x 30 print('ninapro.TestLabels shape: ', ninapro.TestLabels.shape) # m x 8 # Validate print('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape) # m x 16 x 30 print('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape) # m x 8 print('Read successfully done...') # number of total classes of movements, 8 for exampel. nMV = ninapro.TrainLabels.shape[1] # - build the Convolutional Neural Network #-------------------------------------------------add Full+Dropout+Fully # Setup placeholders for input data with tf.name_scope('Input'): x = tf.placeholder(tf.float32, shape=[None, 16,30], name='X') y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels') if Debug: print('input x shape: ', x.shape) print('input y shape: ', y.shape) # every sample with the dimensionality, 16x30 x_image = tf.reshape(x, [-1, 16, 30, 1]) if Debug: print('x_image shape: ', x_image.shape) # summary #tf.summary.image('input', x, 4) firstIn = 1 firstOut = 32 with tf.name_scope('First'): # convolution w1 = tf.Variable(tf.truncated_normal([1,16, firstIn, firstOut], stddev=0.1), name = 'W') b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name = 'B' ) s1 = 1 conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME' ) act1 = tf.nn.relu(conv1 + b1) # summary tf.summary.histogram('weights', w1) tf.summary.histogram('biases', b1) tf.summary.histogram('activation', act1) # dimensionality checking if Debug: print('w1 shape: ', w1.shape) print('b1 shape: ', b1.shape) print('conv1 shape: ', conv1.shape) print('act1 shape: ', act1.shape) secondIn = firstOut secondOut = 32 with tf.name_scope('Second'): # convolution w2 = tf.Variable(tf.truncated_normal([3,3, secondIn, secondOut], stddev=0.1), name='W') b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B') s2 = 1 conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME') # detector act2 = tf.nn.relu(conv2 + b2) # maxpooling k2 = 3 ms2 = 1 mp2 = tf.nn.max_pool(act2, ksize=[1, k2,k2, 1], strides=[1,ms2,ms2,1], padding='SAME') # summary tf.summary.histogram('weights', w2) tf.summary.histogram('biases', b2) tf.summary.histogram('activation', act2) tf.summary.histogram('maxpooling', mp2) # dimensionality checking if Debug: print('w2 shape: ', w2.shape) print('b2 shape: ', b2.shape) print('conv2 shape: ', conv2.shape) print('act2 shape: ', act2.shape) print('mp2 shape: ', mp2.shape) thirdIn = secondOut thirdOut = 64 with tf.name_scope('Third'): # convolution w3 = tf.Variable(tf.truncated_normal([5,5, thirdIn, thirdOut], stddev=0.1), name='W') b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B') s3 = 1 conv3 = tf.nn.conv2d(mp2, w3, strides=[1,s3,s3,1], padding='SAME') # detector act3 = tf.nn.relu(conv3 + b3) # maxpooling k3 = 3 # ksize of maxpooling ms3 = 1 # maxpooling stride = 3 mp3 = tf.nn.max_pool(act3, ksize=[1,k3,k3,1], strides=[1, ms3, ms3, 1], padding='SAME') # summary tf.summary.histogram('weights', w3) tf.summary.histogram('biases', b3) tf.summary.histogram('activation', act3) tf.summary.histogram('maxpooling', mp3) # dimensionality checking if Debug: print('w3 shape: ', w3.shape) print('b3 shape: ', b3.shape) print('conv3 shape: ', conv3.shape) print('act3 shape: ', act3.shape) print('mp3 shape: ', mp3.shape) fourthIn = thirdOut fourthOut = 64 with tf.name_scope('Fourth'): # convolution w4 = tf.Variable(tf.truncated_normal([6,1, fourthIn, fourthOut], stddev=0.1), name='W') b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B') s4 = 1 conv4 = tf.nn.conv2d(mp3, w4, strides=[1,s4,s4,1], padding='SAME') # detector act4 = tf.nn.relu(conv4 + b4) # summary tf.summary.histogram('weights', w4) tf.summary.histogram('biases', b4) tf.summary.histogram('activation', act4) # dimensionality checking if Debug: print('w4 shape: ', w4.shape) print('b4 shape: ', b4.shape) print('conv4 shape: ', conv4.shape) print('act4 shape: ', act4.shape) fifthIn = fourthOut fifthOut = 8 with tf.name_scope('Fifth'): # convolution w5 = tf.Variable(tf.truncated_normal([1,1, fifthIn, fifthOut], stddev=0.1), name='W') b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B') s5 = 1 conv5 = tf.nn.conv2d(act4, w5, strides=[1,s5,s5,1], padding='SAME') # detector act5 = tf.nn.relu(conv5 + b5) # flatten with tf.name_scope('Flatten'): flatten5 = tf.reshape(act5, [-1, 16*30*fifthOut]) # fully-connect layer with tf.name_scope('FullyCon'): wfc5 = tf.Variable(tf.truncated_normal( [16*30*fifthOut, nMV], stddev=0.1), name='W') bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B') y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5) # summary tf.summary.histogram('weights', w5) tf.summary.histogram('biases', b5) tf.summary.histogram('activation', act5) tf.summary.histogram('flatten', flatten5) tf.summary.histogram('weights_fc5', wfc5) tf.summary.histogram('biases_fc5', bfc5) # dimensionality checking if Debug: print('w5 shape: ', w5.shape) print('b5 shape: ', b5.shape) print('conv5 shape: ', conv5.shape) print('act5 shape: ', act5.shape) print('flatten5 shape: ', flatten5.shape) print('weights_fc5 shape: ', wfc5.shape) print('biases_fc5 shape: ', bfc5.shape) print('y_predict shape: ', y_.shape) with tf.name_scope('Softmaxloss'): cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y), name='Loss') # summary tf.summary.scalar('cross_entropy', cross_entropy) with tf.name_scope('Accuracy'): correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # summary tf.summary.scalar('accuracy', accuracy) # Use an AdamOptimizer to train the network train = tf.train.AdamOptimizer(1e-1).minimize(cross_entropy) # Visualization directory graph_dir = 'sEMGCNN' import usefulFcns usefulFcns.BuildNewlyDir(graph_dir) # Train the model with tf.Session() as sess: sess.run(tf.global_variables_initializer()) merged_summary = tf.summary.merge_all() writer = tf.summary.FileWriter(graph_dir) writer.add_graph(sess.graph) for i in range(2000): x_batch, y_batch = ninapro.next_batch(30) # Occasionaly report accuracy of [train] and [test] if i%100==0: [train_accuracy] = sess.run([accuracy], feed_dict={x:x_batch, y:y_batch}) [test_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.TestImages, y:ninapro.TestLabels}) [validate_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.ValidateImages, y:ninapro.ValidateLabels} ) print('Step %d, training %g, testing %g, validate %g.' % (i, train_accuracy, test_accuracy, validate_accuracy) ) # Occasionaly write visualization summary to disk file. if i%5==0: s = sess.run(merged_summary, feed_dict={x:x_batch, y:y_batch}) writer.add_summary(s,i) # Training the model sess.run(train, feed_dict={x:x_batch, y:y_batch})
normal
{ "blob_id": "30aa8405ccf64ce8a05204f3f9fa2ffab436ad3b", "index": 1578, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(tf.__version__)\n<mask token>\nninapro.splitImagesLabels()\nprint('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)\nprint('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)\nprint('ninapro.TestImages shape: ', ninapro.TestImages.shape)\nprint('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)\nprint('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)\nprint('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)\nprint('Read successfully done...')\n<mask token>\nwith tf.name_scope('Input'):\n x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')\n y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')\n if Debug:\n print('input x shape: ', x.shape)\n print('input y shape: ', y.shape)\n<mask token>\nif Debug:\n print('x_image shape: ', x_image.shape)\n<mask token>\nwith tf.name_scope('First'):\n w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev\n =0.1), name='W')\n b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')\n s1 = 1\n conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')\n act1 = tf.nn.relu(conv1 + b1)\n tf.summary.histogram('weights', w1)\n tf.summary.histogram('biases', b1)\n tf.summary.histogram('activation', act1)\n if Debug:\n print('w1 shape: ', w1.shape)\n print('b1 shape: ', b1.shape)\n print('conv1 shape: ', conv1.shape)\n print('act1 shape: ', act1.shape)\n<mask token>\nwith tf.name_scope('Second'):\n w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],\n stddev=0.1), name='W')\n b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')\n s2 = 1\n conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')\n act2 = tf.nn.relu(conv2 + b2)\n k2 = 3\n ms2 = 1\n mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2, \n 1], padding='SAME')\n tf.summary.histogram('weights', w2)\n tf.summary.histogram('biases', b2)\n tf.summary.histogram('activation', act2)\n tf.summary.histogram('maxpooling', mp2)\n if Debug:\n print('w2 shape: ', w2.shape)\n print('b2 shape: ', b2.shape)\n print('conv2 shape: ', conv2.shape)\n print('act2 shape: ', act2.shape)\n print('mp2 shape: ', mp2.shape)\n<mask token>\nwith tf.name_scope('Third'):\n w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=\n 0.1), name='W')\n b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')\n s3 = 1\n conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')\n act3 = tf.nn.relu(conv3 + b3)\n k3 = 3\n ms3 = 1\n mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3, \n 1], padding='SAME')\n tf.summary.histogram('weights', w3)\n tf.summary.histogram('biases', b3)\n tf.summary.histogram('activation', act3)\n tf.summary.histogram('maxpooling', mp3)\n if Debug:\n print('w3 shape: ', w3.shape)\n print('b3 shape: ', b3.shape)\n print('conv3 shape: ', conv3.shape)\n print('act3 shape: ', act3.shape)\n print('mp3 shape: ', mp3.shape)\n<mask token>\nwith tf.name_scope('Fourth'):\n w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],\n stddev=0.1), name='W')\n b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')\n s4 = 1\n conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')\n act4 = tf.nn.relu(conv4 + b4)\n tf.summary.histogram('weights', w4)\n tf.summary.histogram('biases', b4)\n tf.summary.histogram('activation', act4)\n if Debug:\n print('w4 shape: ', w4.shape)\n print('b4 shape: ', b4.shape)\n print('conv4 shape: ', conv4.shape)\n print('act4 shape: ', act4.shape)\n<mask token>\nwith tf.name_scope('Fifth'):\n w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=\n 0.1), name='W')\n b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')\n s5 = 1\n conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')\n act5 = tf.nn.relu(conv5 + b5)\n with tf.name_scope('Flatten'):\n flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])\n with tf.name_scope('FullyCon'):\n wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],\n stddev=0.1), name='W')\n bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')\n y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)\n tf.summary.histogram('weights', w5)\n tf.summary.histogram('biases', b5)\n tf.summary.histogram('activation', act5)\n tf.summary.histogram('flatten', flatten5)\n tf.summary.histogram('weights_fc5', wfc5)\n tf.summary.histogram('biases_fc5', bfc5)\n if Debug:\n print('w5 shape: ', w5.shape)\n print('b5 shape: ', b5.shape)\n print('conv5 shape: ', conv5.shape)\n print('act5 shape: ', act5.shape)\n print('flatten5 shape: ', flatten5.shape)\n print('weights_fc5 shape: ', wfc5.shape)\n print('biases_fc5 shape: ', bfc5.shape)\n print('y_predict shape: ', y_.shape)\nwith tf.name_scope('Softmaxloss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=y_, labels=y), name='Loss')\n tf.summary.scalar('cross_entropy', cross_entropy)\nwith tf.name_scope('Accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n<mask token>\nusefulFcns.BuildNewlyDir(graph_dir)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(graph_dir)\n writer.add_graph(sess.graph)\n for i in range(2000):\n x_batch, y_batch = ninapro.next_batch(30)\n if i % 100 == 0:\n [train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,\n y: y_batch})\n [test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.\n TestImages, y: ninapro.TestLabels})\n [validate_accuracy] = sess.run([accuracy], feed_dict={x:\n ninapro.ValidateImages, y: ninapro.ValidateLabels})\n print('Step %d, training %g, testing %g, validate %g.' % (i,\n train_accuracy, test_accuracy, validate_accuracy))\n if i % 5 == 0:\n s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})\n writer.add_summary(s, i)\n sess.run(train, feed_dict={x: x_batch, y: y_batch})\n", "step-3": "<mask token>\nprint(tf.__version__)\nDebug = True\nninapro = Ninapro()\nninapro.splitImagesLabels()\nprint('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)\nprint('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)\nprint('ninapro.TestImages shape: ', ninapro.TestImages.shape)\nprint('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)\nprint('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)\nprint('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)\nprint('Read successfully done...')\nnMV = ninapro.TrainLabels.shape[1]\nwith tf.name_scope('Input'):\n x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')\n y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')\n if Debug:\n print('input x shape: ', x.shape)\n print('input y shape: ', y.shape)\nx_image = tf.reshape(x, [-1, 16, 30, 1])\nif Debug:\n print('x_image shape: ', x_image.shape)\nfirstIn = 1\nfirstOut = 32\nwith tf.name_scope('First'):\n w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev\n =0.1), name='W')\n b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')\n s1 = 1\n conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')\n act1 = tf.nn.relu(conv1 + b1)\n tf.summary.histogram('weights', w1)\n tf.summary.histogram('biases', b1)\n tf.summary.histogram('activation', act1)\n if Debug:\n print('w1 shape: ', w1.shape)\n print('b1 shape: ', b1.shape)\n print('conv1 shape: ', conv1.shape)\n print('act1 shape: ', act1.shape)\nsecondIn = firstOut\nsecondOut = 32\nwith tf.name_scope('Second'):\n w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],\n stddev=0.1), name='W')\n b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')\n s2 = 1\n conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')\n act2 = tf.nn.relu(conv2 + b2)\n k2 = 3\n ms2 = 1\n mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2, \n 1], padding='SAME')\n tf.summary.histogram('weights', w2)\n tf.summary.histogram('biases', b2)\n tf.summary.histogram('activation', act2)\n tf.summary.histogram('maxpooling', mp2)\n if Debug:\n print('w2 shape: ', w2.shape)\n print('b2 shape: ', b2.shape)\n print('conv2 shape: ', conv2.shape)\n print('act2 shape: ', act2.shape)\n print('mp2 shape: ', mp2.shape)\nthirdIn = secondOut\nthirdOut = 64\nwith tf.name_scope('Third'):\n w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=\n 0.1), name='W')\n b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')\n s3 = 1\n conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')\n act3 = tf.nn.relu(conv3 + b3)\n k3 = 3\n ms3 = 1\n mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3, \n 1], padding='SAME')\n tf.summary.histogram('weights', w3)\n tf.summary.histogram('biases', b3)\n tf.summary.histogram('activation', act3)\n tf.summary.histogram('maxpooling', mp3)\n if Debug:\n print('w3 shape: ', w3.shape)\n print('b3 shape: ', b3.shape)\n print('conv3 shape: ', conv3.shape)\n print('act3 shape: ', act3.shape)\n print('mp3 shape: ', mp3.shape)\nfourthIn = thirdOut\nfourthOut = 64\nwith tf.name_scope('Fourth'):\n w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],\n stddev=0.1), name='W')\n b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')\n s4 = 1\n conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')\n act4 = tf.nn.relu(conv4 + b4)\n tf.summary.histogram('weights', w4)\n tf.summary.histogram('biases', b4)\n tf.summary.histogram('activation', act4)\n if Debug:\n print('w4 shape: ', w4.shape)\n print('b4 shape: ', b4.shape)\n print('conv4 shape: ', conv4.shape)\n print('act4 shape: ', act4.shape)\nfifthIn = fourthOut\nfifthOut = 8\nwith tf.name_scope('Fifth'):\n w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=\n 0.1), name='W')\n b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')\n s5 = 1\n conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')\n act5 = tf.nn.relu(conv5 + b5)\n with tf.name_scope('Flatten'):\n flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])\n with tf.name_scope('FullyCon'):\n wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],\n stddev=0.1), name='W')\n bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')\n y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)\n tf.summary.histogram('weights', w5)\n tf.summary.histogram('biases', b5)\n tf.summary.histogram('activation', act5)\n tf.summary.histogram('flatten', flatten5)\n tf.summary.histogram('weights_fc5', wfc5)\n tf.summary.histogram('biases_fc5', bfc5)\n if Debug:\n print('w5 shape: ', w5.shape)\n print('b5 shape: ', b5.shape)\n print('conv5 shape: ', conv5.shape)\n print('act5 shape: ', act5.shape)\n print('flatten5 shape: ', flatten5.shape)\n print('weights_fc5 shape: ', wfc5.shape)\n print('biases_fc5 shape: ', bfc5.shape)\n print('y_predict shape: ', y_.shape)\nwith tf.name_scope('Softmaxloss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=y_, labels=y), name='Loss')\n tf.summary.scalar('cross_entropy', cross_entropy)\nwith tf.name_scope('Accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\ntrain = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)\ngraph_dir = 'sEMGCNN'\n<mask token>\nusefulFcns.BuildNewlyDir(graph_dir)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(graph_dir)\n writer.add_graph(sess.graph)\n for i in range(2000):\n x_batch, y_batch = ninapro.next_batch(30)\n if i % 100 == 0:\n [train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,\n y: y_batch})\n [test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.\n TestImages, y: ninapro.TestLabels})\n [validate_accuracy] = sess.run([accuracy], feed_dict={x:\n ninapro.ValidateImages, y: ninapro.ValidateLabels})\n print('Step %d, training %g, testing %g, validate %g.' % (i,\n train_accuracy, test_accuracy, validate_accuracy))\n if i % 5 == 0:\n s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})\n writer.add_summary(s, i)\n sess.run(train, feed_dict={x: x_batch, y: y_batch})\n", "step-4": "from classNinapro import Ninapro\nimport numpy as np\nimport tensorflow as tf\nprint(tf.__version__)\nDebug = True\nninapro = Ninapro()\nninapro.splitImagesLabels()\nprint('ninapro.TrainImages shape: ', ninapro.TrainImages.shape)\nprint('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape)\nprint('ninapro.TestImages shape: ', ninapro.TestImages.shape)\nprint('ninapro.TestLabels shape: ', ninapro.TestLabels.shape)\nprint('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape)\nprint('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape)\nprint('Read successfully done...')\nnMV = ninapro.TrainLabels.shape[1]\nwith tf.name_scope('Input'):\n x = tf.placeholder(tf.float32, shape=[None, 16, 30], name='X')\n y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')\n if Debug:\n print('input x shape: ', x.shape)\n print('input y shape: ', y.shape)\nx_image = tf.reshape(x, [-1, 16, 30, 1])\nif Debug:\n print('x_image shape: ', x_image.shape)\nfirstIn = 1\nfirstOut = 32\nwith tf.name_scope('First'):\n w1 = tf.Variable(tf.truncated_normal([1, 16, firstIn, firstOut], stddev\n =0.1), name='W')\n b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name='B')\n s1 = 1\n conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME')\n act1 = tf.nn.relu(conv1 + b1)\n tf.summary.histogram('weights', w1)\n tf.summary.histogram('biases', b1)\n tf.summary.histogram('activation', act1)\n if Debug:\n print('w1 shape: ', w1.shape)\n print('b1 shape: ', b1.shape)\n print('conv1 shape: ', conv1.shape)\n print('act1 shape: ', act1.shape)\nsecondIn = firstOut\nsecondOut = 32\nwith tf.name_scope('Second'):\n w2 = tf.Variable(tf.truncated_normal([3, 3, secondIn, secondOut],\n stddev=0.1), name='W')\n b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')\n s2 = 1\n conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')\n act2 = tf.nn.relu(conv2 + b2)\n k2 = 3\n ms2 = 1\n mp2 = tf.nn.max_pool(act2, ksize=[1, k2, k2, 1], strides=[1, ms2, ms2, \n 1], padding='SAME')\n tf.summary.histogram('weights', w2)\n tf.summary.histogram('biases', b2)\n tf.summary.histogram('activation', act2)\n tf.summary.histogram('maxpooling', mp2)\n if Debug:\n print('w2 shape: ', w2.shape)\n print('b2 shape: ', b2.shape)\n print('conv2 shape: ', conv2.shape)\n print('act2 shape: ', act2.shape)\n print('mp2 shape: ', mp2.shape)\nthirdIn = secondOut\nthirdOut = 64\nwith tf.name_scope('Third'):\n w3 = tf.Variable(tf.truncated_normal([5, 5, thirdIn, thirdOut], stddev=\n 0.1), name='W')\n b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')\n s3 = 1\n conv3 = tf.nn.conv2d(mp2, w3, strides=[1, s3, s3, 1], padding='SAME')\n act3 = tf.nn.relu(conv3 + b3)\n k3 = 3\n ms3 = 1\n mp3 = tf.nn.max_pool(act3, ksize=[1, k3, k3, 1], strides=[1, ms3, ms3, \n 1], padding='SAME')\n tf.summary.histogram('weights', w3)\n tf.summary.histogram('biases', b3)\n tf.summary.histogram('activation', act3)\n tf.summary.histogram('maxpooling', mp3)\n if Debug:\n print('w3 shape: ', w3.shape)\n print('b3 shape: ', b3.shape)\n print('conv3 shape: ', conv3.shape)\n print('act3 shape: ', act3.shape)\n print('mp3 shape: ', mp3.shape)\nfourthIn = thirdOut\nfourthOut = 64\nwith tf.name_scope('Fourth'):\n w4 = tf.Variable(tf.truncated_normal([6, 1, fourthIn, fourthOut],\n stddev=0.1), name='W')\n b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')\n s4 = 1\n conv4 = tf.nn.conv2d(mp3, w4, strides=[1, s4, s4, 1], padding='SAME')\n act4 = tf.nn.relu(conv4 + b4)\n tf.summary.histogram('weights', w4)\n tf.summary.histogram('biases', b4)\n tf.summary.histogram('activation', act4)\n if Debug:\n print('w4 shape: ', w4.shape)\n print('b4 shape: ', b4.shape)\n print('conv4 shape: ', conv4.shape)\n print('act4 shape: ', act4.shape)\nfifthIn = fourthOut\nfifthOut = 8\nwith tf.name_scope('Fifth'):\n w5 = tf.Variable(tf.truncated_normal([1, 1, fifthIn, fifthOut], stddev=\n 0.1), name='W')\n b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')\n s5 = 1\n conv5 = tf.nn.conv2d(act4, w5, strides=[1, s5, s5, 1], padding='SAME')\n act5 = tf.nn.relu(conv5 + b5)\n with tf.name_scope('Flatten'):\n flatten5 = tf.reshape(act5, [-1, 16 * 30 * fifthOut])\n with tf.name_scope('FullyCon'):\n wfc5 = tf.Variable(tf.truncated_normal([16 * 30 * fifthOut, nMV],\n stddev=0.1), name='W')\n bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')\n y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)\n tf.summary.histogram('weights', w5)\n tf.summary.histogram('biases', b5)\n tf.summary.histogram('activation', act5)\n tf.summary.histogram('flatten', flatten5)\n tf.summary.histogram('weights_fc5', wfc5)\n tf.summary.histogram('biases_fc5', bfc5)\n if Debug:\n print('w5 shape: ', w5.shape)\n print('b5 shape: ', b5.shape)\n print('conv5 shape: ', conv5.shape)\n print('act5 shape: ', act5.shape)\n print('flatten5 shape: ', flatten5.shape)\n print('weights_fc5 shape: ', wfc5.shape)\n print('biases_fc5 shape: ', bfc5.shape)\n print('y_predict shape: ', y_.shape)\nwith tf.name_scope('Softmaxloss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(\n logits=y_, labels=y), name='Loss')\n tf.summary.scalar('cross_entropy', cross_entropy)\nwith tf.name_scope('Accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\ntrain = tf.train.AdamOptimizer(0.1).minimize(cross_entropy)\ngraph_dir = 'sEMGCNN'\nimport usefulFcns\nusefulFcns.BuildNewlyDir(graph_dir)\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(graph_dir)\n writer.add_graph(sess.graph)\n for i in range(2000):\n x_batch, y_batch = ninapro.next_batch(30)\n if i % 100 == 0:\n [train_accuracy] = sess.run([accuracy], feed_dict={x: x_batch,\n y: y_batch})\n [test_accuracy] = sess.run([accuracy], feed_dict={x: ninapro.\n TestImages, y: ninapro.TestLabels})\n [validate_accuracy] = sess.run([accuracy], feed_dict={x:\n ninapro.ValidateImages, y: ninapro.ValidateLabels})\n print('Step %d, training %g, testing %g, validate %g.' % (i,\n train_accuracy, test_accuracy, validate_accuracy))\n if i % 5 == 0:\n s = sess.run(merged_summary, feed_dict={x: x_batch, y: y_batch})\n writer.add_summary(s, i)\n sess.run(train, feed_dict={x: x_batch, y: y_batch})\n", "step-5": "from classNinapro import Ninapro\nimport numpy as np\n\nimport tensorflow as tf\nprint(tf.__version__)\n\nDebug = True # for tensor dimensionality checking\nninapro = Ninapro()\nninapro.splitImagesLabels()\n\n# Train\nprint('ninapro.TrainImages shape: ', ninapro.TrainImages.shape) # m x 16 x 30\nprint('ninapro.TrainLabels shape: ', ninapro.TrainLabels.shape) # m x 8\n# Test\nprint('ninapro.TestImages shape: ', ninapro.TestImages.shape) # m x 16 x 30\nprint('ninapro.TestLabels shape: ', ninapro.TestLabels.shape) # m x 8\n# Validate\nprint('ninapro.ValidateImages shape: ', ninapro.ValidateImages.shape) # m x 16 x 30\nprint('ninapro.ValidateLabels shape: ', ninapro.ValidateLabels.shape) # m x 8\n\nprint('Read successfully done...')\n\n# number of total classes of movements, 8 for exampel.\nnMV = ninapro.TrainLabels.shape[1]\n\n# - build the Convolutional Neural Network\n#-------------------------------------------------add Full+Dropout+Fully\n\n# Setup placeholders for input data\n\nwith tf.name_scope('Input'):\n x = tf.placeholder(tf.float32, shape=[None, 16,30], name='X')\n y = tf.placeholder(tf.float32, shape=[None, nMV], name='Labels')\n\n if Debug:\n print('input x shape: ', x.shape)\n print('input y shape: ', y.shape)\n\n# every sample with the dimensionality, 16x30\nx_image = tf.reshape(x, [-1, 16, 30, 1])\nif Debug:\n print('x_image shape: ', x_image.shape)\n\n# summary \n#tf.summary.image('input', x, 4)\n\n\nfirstIn = 1\nfirstOut = 32\nwith tf.name_scope('First'):\n # convolution\n w1 = tf.Variable(tf.truncated_normal([1,16, firstIn, firstOut], stddev=0.1), name = 'W')\n b1 = tf.Variable(tf.constant(0.1, shape=[firstOut]), name = 'B' )\n s1 = 1\n conv1 = tf.nn.conv2d(x_image, w1, strides=[1, s1, s1, 1], padding='SAME' )\n act1 = tf.nn.relu(conv1 + b1)\n # summary\n tf.summary.histogram('weights', w1)\n tf.summary.histogram('biases', b1)\n tf.summary.histogram('activation', act1) \n\n # dimensionality checking\n if Debug:\n print('w1 shape: ', w1.shape)\n print('b1 shape: ', b1.shape)\n print('conv1 shape: ', conv1.shape)\n print('act1 shape: ', act1.shape)\n\n\nsecondIn = firstOut\nsecondOut = 32\nwith tf.name_scope('Second'):\n # convolution\n w2 = tf.Variable(tf.truncated_normal([3,3, secondIn, secondOut], stddev=0.1), name='W')\n b2 = tf.Variable(tf.constant(0.1, shape=[secondOut]), name='B')\n s2 = 1\n conv2 = tf.nn.conv2d(act1, w2, strides=[1, s2, s2, 1], padding='SAME')\n # detector\n act2 = tf.nn.relu(conv2 + b2)\n # maxpooling\n k2 = 3\n ms2 = 1\n mp2 = tf.nn.max_pool(act2, ksize=[1, k2,k2, 1], strides=[1,ms2,ms2,1], padding='SAME')\n # summary\n tf.summary.histogram('weights', w2)\n tf.summary.histogram('biases', b2)\n tf.summary.histogram('activation', act2)\n tf.summary.histogram('maxpooling', mp2)\n\n # dimensionality checking\n if Debug:\n print('w2 shape: ', w2.shape)\n print('b2 shape: ', b2.shape)\n print('conv2 shape: ', conv2.shape)\n print('act2 shape: ', act2.shape)\n print('mp2 shape: ', mp2.shape)\n\nthirdIn = secondOut\nthirdOut = 64\nwith tf.name_scope('Third'):\n # convolution\n w3 = tf.Variable(tf.truncated_normal([5,5, thirdIn, thirdOut], stddev=0.1), name='W')\n b3 = tf.Variable(tf.constant(0.1, shape=[thirdOut]), name='B')\n s3 = 1\n conv3 = tf.nn.conv2d(mp2, w3, strides=[1,s3,s3,1], padding='SAME')\n # detector\n act3 = tf.nn.relu(conv3 + b3)\n # maxpooling\n k3 = 3 # ksize of maxpooling\n ms3 = 1 # maxpooling stride = 3\n mp3 = tf.nn.max_pool(act3, ksize=[1,k3,k3,1], strides=[1, ms3, ms3, 1], padding='SAME')\n\n # summary\n tf.summary.histogram('weights', w3)\n tf.summary.histogram('biases', b3)\n tf.summary.histogram('activation', act3)\n tf.summary.histogram('maxpooling', mp3)\n\n # dimensionality checking\n if Debug:\n print('w3 shape: ', w3.shape)\n print('b3 shape: ', b3.shape)\n print('conv3 shape: ', conv3.shape)\n print('act3 shape: ', act3.shape)\n print('mp3 shape: ', mp3.shape)\n\n\nfourthIn = thirdOut\nfourthOut = 64\nwith tf.name_scope('Fourth'):\n # convolution\n w4 = tf.Variable(tf.truncated_normal([6,1, fourthIn, fourthOut], stddev=0.1), name='W')\n b4 = tf.Variable(tf.constant(0.1, shape=[fourthOut]), name='B')\n s4 = 1\n conv4 = tf.nn.conv2d(mp3, w4, strides=[1,s4,s4,1], padding='SAME')\n # detector\n act4 = tf.nn.relu(conv4 + b4)\n \n # summary\n tf.summary.histogram('weights', w4)\n tf.summary.histogram('biases', b4)\n tf.summary.histogram('activation', act4)\n\n # dimensionality checking\n if Debug:\n print('w4 shape: ', w4.shape)\n print('b4 shape: ', b4.shape)\n print('conv4 shape: ', conv4.shape)\n print('act4 shape: ', act4.shape)\n\nfifthIn = fourthOut\nfifthOut = 8\nwith tf.name_scope('Fifth'):\n # convolution\n w5 = tf.Variable(tf.truncated_normal([1,1, fifthIn, fifthOut], stddev=0.1), name='W')\n b5 = tf.Variable(tf.constant(0.1, shape=[fifthOut]), name='B')\n s5 = 1\n conv5 = tf.nn.conv2d(act4, w5, strides=[1,s5,s5,1], padding='SAME')\n # detector\n act5 = tf.nn.relu(conv5 + b5)\n\n # flatten\n with tf.name_scope('Flatten'):\n flatten5 = tf.reshape(act5, [-1, 16*30*fifthOut])\n # fully-connect layer\n with tf.name_scope('FullyCon'):\n wfc5 = tf.Variable(tf.truncated_normal( [16*30*fifthOut, nMV], stddev=0.1), name='W')\n bfc5 = tf.Variable(tf.constant(0.1, shape=[nMV]), name='B')\n y_ = tf.nn.relu(tf.matmul(flatten5, wfc5) + bfc5)\n\n # summary\n tf.summary.histogram('weights', w5)\n tf.summary.histogram('biases', b5)\n tf.summary.histogram('activation', act5)\n tf.summary.histogram('flatten', flatten5)\n tf.summary.histogram('weights_fc5', wfc5)\n tf.summary.histogram('biases_fc5', bfc5)\n\n\n # dimensionality checking\n if Debug:\n print('w5 shape: ', w5.shape)\n print('b5 shape: ', b5.shape)\n print('conv5 shape: ', conv5.shape)\n print('act5 shape: ', act5.shape)\n print('flatten5 shape: ', flatten5.shape)\n print('weights_fc5 shape: ', wfc5.shape)\n print('biases_fc5 shape: ', bfc5.shape)\n print('y_predict shape: ', y_.shape)\n\n\nwith tf.name_scope('Softmaxloss'):\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y), name='Loss')\n # summary\n tf.summary.scalar('cross_entropy', cross_entropy)\n \n\nwith tf.name_scope('Accuracy'):\n correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n # summary\n tf.summary.scalar('accuracy', accuracy)\n\n# Use an AdamOptimizer to train the network\ntrain = tf.train.AdamOptimizer(1e-1).minimize(cross_entropy)\n\n# Visualization directory\ngraph_dir = 'sEMGCNN'\nimport usefulFcns\nusefulFcns.BuildNewlyDir(graph_dir)\n\n# Train the model\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n merged_summary = tf.summary.merge_all()\n writer = tf.summary.FileWriter(graph_dir)\n writer.add_graph(sess.graph)\n\n for i in range(2000):\n x_batch, y_batch = ninapro.next_batch(30)\n\n # Occasionaly report accuracy of [train] and [test]\n if i%100==0:\n [train_accuracy] = sess.run([accuracy], feed_dict={x:x_batch, y:y_batch})\n [test_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.TestImages, y:ninapro.TestLabels})\n [validate_accuracy] = sess.run([accuracy], feed_dict={x:ninapro.ValidateImages, y:ninapro.ValidateLabels} )\n print('Step %d, training %g, testing %g, validate %g.' % (i, train_accuracy, test_accuracy, validate_accuracy) )\n \n # Occasionaly write visualization summary to disk file.\n if i%5==0:\n s = sess.run(merged_summary, feed_dict={x:x_batch, y:y_batch})\n writer.add_summary(s,i)\n # Training the model\n sess.run(train, feed_dict={x:x_batch, y:y_batch})\n \n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#connect4_JayNa.py #Jay Na #CS111 Spring 2018 #This file creates a version of the game Connect4, where the user plays against an AI from graphics import * import random class ConnectWindow: def __init__(self): self.window = GraphWin("Connect Four", 690, 590) self.window.setMouseHandler(self.handleClick) self.startScreen() self.currentUser = 1 self.limitCounter = 0 def startScreen(self): '''This function creates the board and intializes the board count for each column''' #draws blue rectangle as the background self.background = Rectangle(Point(0,0), Point(690,590)) self.background.setFill('blue') self.background.draw(self.window) #draws white circles to represent the spots for the game for i in range(7): for j in range(6): self.Circles = Circle(Point(i*100+50,j*100+50),(30)) self.Circles.setFill('white') self.Circles.draw(self.window) #draws lines to separate circles in rectangle for i in range(6): self.horizLine = Line(Point(0,i*100+100), Point(900,i*100+100)) self.vertLine = Line(Point(100*i+100,0), Point(100*i+100,900)) self.horizLine.draw(self.window) self.vertLine.draw(self.window) #initiates counts for each column and creates grid self.grid = [[],[],[],[],[],[],[]] self.boardCount = [0,0,0,0,0,0,0] counter = 2 #help from CS Major, Joh Farmer for x in range(7): for y in range(6): self.grid[x].append(counter) counter += 1 def validClick(self, x): '''This function checks if there is enough space vertically for move to be valid''' if self.boardCount[x] >= 6: print("Invalid Move") return False else: return True def drawUmove(self): '''This function prints the pieces onto the board at the given position from the user''' piece = Circle(Point(self.x*100+50, 600-(self.y*100+50)),30) piece.setFill('red') piece.draw(self.window) return def handleClick(self, point): '''This function works with the user to add each move into the board count and to the current grid''' self.newX = point.getX() self.x = self.newX//100 self.y = self.boardCount[self.x] if self.validClick(self.x): self.boardCount[self.x] += 1 self.limitCounter += 1 self.grid[self.x][self.y] = self.currentUser if self.isWon() == False: self.limitCounter += 1 self.computerMove() self.drawUmove() def isWon(self): '''This function checks if there is a winner in the game (True/False) and calls printWinner function''' #checks to see if there is a winner vertically for i in range(7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i][j+1] self.square3 = self.grid[i][j+2] self.square4 = self.grid[i][j+3] if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: self.printWinner(self.square1) return True #checks to see if there is a winner diagonally from lower left to upper right for i in range(4): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j+1] self.square3 = self.grid[i+2][j+2] self.square4 = self.grid[i+3][j+3] if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: self.printWinner(self.square1) return True #checks to see if there is a winner diagonally from upper left to lower right for i in range(3,7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i-1][j+1] self.square3 = self.grid[i-2][j+2] self.square4 = self.grid[i-3][j+3] if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: self.printWinner(self.square1) return True #checks to see if there is a winner horizontally for i in range(4): for j in range(6): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j] self.square3 = self.grid[i+2][j] self.square4 = self.grid[i+3][j] if self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: self.printWinner(self.square1) return True #checks if board is full without a winner (tie) if self.limitCounter == 42: self.printWinner(3) return True return False def printWinner(self, winner): '''This function prints who the winner is or if it is a tie''' #if input is 3 from isWon() fxn, game is tied and so "Tie Game!" is printed if winner == 3: txt = Text(Point(345, 300), "Tie Game!") txt.setFill('white') txt.setSize(35) txt.draw(self.window) return else: #prints "You Won!" if user wins if winner == 1: txt = Text(Point(345, 300), "You Won!") txt.setFill('white') txt.setSize(35) txt.draw(self.window) return else: #prints "Computer Won!" if computer wins txt = Text(Point(345, 300), "Computer Won!") txt.setFill('white') txt.setSize(35) txt.draw(self.window) return def validCmove(self, x, y): '''This function checks if the computer's move will be valid''' #checks if '''if it tries to place it higher than the highest piece''' if self.boardCount[x] > y: return False ''' if it tries to place below the highest piece''' if self.boardCount[x] < y: return False '''if it tries to place it in a column with 6 pieces already''' if self.boardCount[x] >= 6: return False else: return True def drawCmove(self, x ,y): '''This function adds the computer's move to the game board and adds it to the board count''' piece = Circle(Point((x)*100+50, 600 - ((y)*100+50)),30) piece.setFill('yellow') piece.draw(self.window) self.boardCount[x] += 1 self.grid[x][y] = -1 return def computerMove(self): '''This function computes where the computer will put its next move and calls the drawCmove() fxn to do so. The computer will add its piece to wherever there are three in a row in either color then looks to see when there are two in a row. Move will be placed randomly if no pieces are placed in a row''' #checks if there are three pieces lined up vertically in a row and places its move to win or prevent the win''' for i in range(7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i][j+1] self.square3 = self.grid[i][j+2] if self.square1 == self.square2 and self.square2 == self.square3: if self.validCmove(i,j+3): self.drawCmove(i,j+3) return else: self.randomMove() return #checks if there are three pieces lined up diagonally from lower left to upper right and places its move to win or prevent the win #help from CS major, Joh Farmer for i in range(4): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j+1] self.square3 = self.grid[i+2][j+2] if self.square1 == self.square2 and self.square2 == self.square3: if self.validCmove(i+3,j+3): self.drawCmove(i+3,j+3) return if self.validCmove(i-1,j-1): self.drawCmove(i-1,j-1) else: self.randomMove() return #checks if there are three pieces lined up diagonally from lower right to upper left and places its move to win or prevent the win for i in range(3,7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i-1][j+1] self.square3 = self.grid[i-2][j+2] if self.square1 == self.square2 and self.square2 == self.square3: if self.validCmove(i-3,j+3): self.drawCmove(i-3,j+3) return if self.validCmove(i+1,j-1): self.drawCmove(i+1,j-1) else: self.randomMove() return #checks if there are three pieces lined up horizontally in a row and places its move to win or prevent the win (either side)''' for i in range(4): for j in range(6): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j] self.square3 = self.grid[i+2][j] if self.square1 == self.square2 and self.square2 == self.square3: if self.validCmove(i+3,j): self.drawCmove(i+3,j) return if self.validCmove(i-1,j): self.drawCmove(i-1,j) return else: self.randomMove() return #checks if there are two in a row diagonally from lower left to upper right and places its move accordingly for i in range(4): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j+1] if self.square1 == self.square2: if self.validCmove(i+2,j+2): self.drawCmove(i+2,j+2) return if self.validCmove(i-1,j-1): self.drawCmove(i-1,j-1) else: self.randomMove() return #checks if there are two in a row vertically and places its move accordingly for i in range(7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i][j+1] if self.square1 == self.square2: if self.validCmove(i,j+2): self.drawCmove(i,j+2) return if self.validCmove(i,j-1): self.drawCmove(i,j-1) return else: self.randomMove() return #checks if there are two in a row diagonally from lower right to upper left and places its move accordingly for i in range(3,7): for j in range(3): self.square1 = self.grid[i][j] self.square2 = self.grid[i-1][j+1] if self.square1 == self.square2: if self.validCmove(i-2,j+2): self.drawCmove(i-2,j+2) return if self.validCmove(i+1,j-1): self.drawCmove(i+1,j-1) else: self.randomMove() return #checks if there are two in a row horizontally and places its move accordingly for i in range(4): for j in range(6): self.square1 = self.grid[i][j] self.square2 = self.grid[i+1][j] if self.square1 == self.square2: if self.validCmove(i+2,j): self.drawCmove(i+2,j) return if self.validCmove(i-1,j): self.drawCmove(i-1,j) return else: self.randomMove() return #places move randomly if no pieces are being placed in a row else: self.randomMove() def randomMove(self): '''This function creates a random coordinate for its move, checks if it's valid, then prints the move. It will continue to run until numbers are valid for current board''' randY = random.randint(0,6) randX = random.randint(0,7) if self.validCmove(randY,randX): self.drawCmove(randY,randX) return else: self.randomMove() def main(): gameOver = False connect4 = ConnectWindow() while gameOver == False: connect4.window.getMouse() gameOver = connect4.isWon() input("Hit enter to quit") main()
normal
{ "blob_id": "abbad57e945d2195021948a0e0838c6bfd9c6a1e", "index": 769, "step-1": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n <mask token>\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n <mask token>\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n <mask token>\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n\n def drawUmove(self):\n \"\"\"This function prints the pieces onto the board at the given position from the user\"\"\"\n piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)\n piece.setFill('red')\n piece.draw(self.window)\n return\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n\n def isWon(self):\n \"\"\"This function checks if there is a winner in the game (True/False) and calls printWinner function\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n self.square4 = self.grid[i][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n self.square4 = self.grid[i + 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n self.square4 = self.grid[i - 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n self.square4 = self.grid[i + 3][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n if self.limitCounter == 42:\n self.printWinner(3)\n return True\n return False\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n <mask token>\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass ConnectWindow:\n <mask token>\n\n def startScreen(self):\n \"\"\"This function creates the board and intializes the board count for each column\"\"\"\n self.background = Rectangle(Point(0, 0), Point(690, 590))\n self.background.setFill('blue')\n self.background.draw(self.window)\n for i in range(7):\n for j in range(6):\n self.Circles = Circle(Point(i * 100 + 50, j * 100 + 50), 30)\n self.Circles.setFill('white')\n self.Circles.draw(self.window)\n for i in range(6):\n self.horizLine = Line(Point(0, i * 100 + 100), Point(900, i * \n 100 + 100))\n self.vertLine = Line(Point(100 * i + 100, 0), Point(100 * i + \n 100, 900))\n self.horizLine.draw(self.window)\n self.vertLine.draw(self.window)\n self.grid = [[], [], [], [], [], [], []]\n self.boardCount = [0, 0, 0, 0, 0, 0, 0]\n counter = 2\n for x in range(7):\n for y in range(6):\n self.grid[x].append(counter)\n counter += 1\n\n def validClick(self, x):\n \"\"\"This function checks if there is enough space vertically for move to be valid\"\"\"\n if self.boardCount[x] >= 6:\n print('Invalid Move')\n return False\n else:\n return True\n\n def drawUmove(self):\n \"\"\"This function prints the pieces onto the board at the given position from the user\"\"\"\n piece = Circle(Point(self.x * 100 + 50, 600 - (self.y * 100 + 50)), 30)\n piece.setFill('red')\n piece.draw(self.window)\n return\n\n def handleClick(self, point):\n \"\"\"This function works with the user to add each move into the board count and to the current grid\"\"\"\n self.newX = point.getX()\n self.x = self.newX // 100\n self.y = self.boardCount[self.x]\n if self.validClick(self.x):\n self.boardCount[self.x] += 1\n self.limitCounter += 1\n self.grid[self.x][self.y] = self.currentUser\n if self.isWon() == False:\n self.limitCounter += 1\n self.computerMove()\n self.drawUmove()\n\n def isWon(self):\n \"\"\"This function checks if there is a winner in the game (True/False) and calls printWinner function\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n self.square4 = self.grid[i][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n self.square4 = self.grid[i + 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n self.square4 = self.grid[i - 3][j + 3]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n self.square4 = self.grid[i + 3][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3 and self.square3 == self.square4):\n self.printWinner(self.square1)\n return True\n if self.limitCounter == 42:\n self.printWinner(3)\n return True\n return False\n\n def printWinner(self, winner):\n \"\"\"This function prints who the winner is or if it is a tie\"\"\"\n if winner == 3:\n txt = Text(Point(345, 300), 'Tie Game!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n elif winner == 1:\n txt = Text(Point(345, 300), 'You Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n else:\n txt = Text(Point(345, 300), 'Computer Won!')\n txt.setFill('white')\n txt.setSize(35)\n txt.draw(self.window)\n return\n\n def validCmove(self, x, y):\n \"\"\"This function checks if the computer's move will be valid\"\"\"\n if self.boardCount[x] > y:\n return False\n \"\"\" if it tries to place below the highest piece\"\"\"\n if self.boardCount[x] < y:\n return False\n \"\"\"if it tries to place it in a column with 6 pieces already\"\"\"\n if self.boardCount[x] >= 6:\n return False\n else:\n return True\n\n def drawCmove(self, x, y):\n \"\"\"This function adds the computer's move to the game board and adds it to the board count\"\"\"\n piece = Circle(Point(x * 100 + 50, 600 - (y * 100 + 50)), 30)\n piece.setFill('yellow')\n piece.draw(self.window)\n self.boardCount[x] += 1\n self.grid[x][y] = -1\n return\n\n def computerMove(self):\n \"\"\"This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.\n\t\tThe computer will add its piece to wherever there are three in a row in either color then looks to see when \n\t\tthere are two in a row. Move will be placed randomly if no pieces are placed in a row\"\"\"\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n self.square3 = self.grid[i][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i, j + 3):\n self.drawCmove(i, j + 3)\n return\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n self.square3 = self.grid[i + 2][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i + 3, j + 3):\n self.drawCmove(i + 3, j + 3)\n return\n if self.validCmove(i - 1, j - 1):\n self.drawCmove(i - 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n self.square3 = self.grid[i - 2][j + 2]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i - 3, j + 3):\n self.drawCmove(i - 3, j + 3)\n return\n if self.validCmove(i + 1, j - 1):\n self.drawCmove(i + 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n self.square3 = self.grid[i + 2][j]\n if (self.square1 == self.square2 and self.square2 == self.\n square3):\n if self.validCmove(i + 3, j):\n self.drawCmove(i + 3, j)\n return\n if self.validCmove(i - 1, j):\n self.drawCmove(i - 1, j)\n return\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i + 2, j + 2):\n self.drawCmove(i + 2, j + 2)\n return\n if self.validCmove(i - 1, j - 1):\n self.drawCmove(i - 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i, j + 2):\n self.drawCmove(i, j + 2)\n return\n if self.validCmove(i, j - 1):\n self.drawCmove(i, j - 1)\n return\n else:\n self.randomMove()\n return\n for i in range(3, 7):\n for j in range(3):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i - 1][j + 1]\n if self.square1 == self.square2:\n if self.validCmove(i - 2, j + 2):\n self.drawCmove(i - 2, j + 2)\n return\n if self.validCmove(i + 1, j - 1):\n self.drawCmove(i + 1, j - 1)\n else:\n self.randomMove()\n return\n for i in range(4):\n for j in range(6):\n self.square1 = self.grid[i][j]\n self.square2 = self.grid[i + 1][j]\n if self.square1 == self.square2:\n if self.validCmove(i + 2, j):\n self.drawCmove(i + 2, j)\n return\n if self.validCmove(i - 1, j):\n self.drawCmove(i - 1, j)\n return\n else:\n self.randomMove()\n return\n else:\n self.randomMove()\n\n def randomMove(self):\n \"\"\"This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board\"\"\"\n randY = random.randint(0, 6)\n randX = random.randint(0, 7)\n if self.validCmove(randY, randX):\n self.drawCmove(randY, randX)\n return\n else:\n self.randomMove()\n\n\n<mask token>\n", "step-5": "#connect4_JayNa.py\n#Jay Na\n#CS111 Spring 2018\n#This file creates a version of the game Connect4, where the user plays against an AI\n\nfrom graphics import *\nimport random\n\nclass ConnectWindow:\n\n\tdef __init__(self):\n\t\tself.window = GraphWin(\"Connect Four\", 690, 590)\n\t\tself.window.setMouseHandler(self.handleClick)\n\t\tself.startScreen()\n\t\tself.currentUser = 1\n\t\tself.limitCounter = 0\n\t\t\n\n\tdef startScreen(self):\n\t\t'''This function creates the board and intializes the board count for each column'''\n\n\t#draws blue rectangle as the background\n\t\tself.background = Rectangle(Point(0,0), Point(690,590))\n\t\tself.background.setFill('blue')\n\t\tself.background.draw(self.window)\n\t\t\n\t#draws white circles to represent the spots for the game\t\n\t\tfor i in range(7):\n\t\t\tfor j in range(6):\n\t\t\t\tself.Circles = Circle(Point(i*100+50,j*100+50),(30))\n\t\t\t\tself.Circles.setFill('white')\n\t\t\t\tself.Circles.draw(self.window)\n\t\t\t\t\n\t#draws lines to separate circles in rectangle\n\t\tfor i in range(6):\n\t\t\tself.horizLine = Line(Point(0,i*100+100), Point(900,i*100+100))\n\t\t\tself.vertLine = Line(Point(100*i+100,0), Point(100*i+100,900))\n\t\t\tself.horizLine.draw(self.window)\n\t\t\tself.vertLine.draw(self.window)\n\t\t\t\n\t#initiates counts for each column and creates grid\n\t\tself.grid = [[],[],[],[],[],[],[]]\n\t\tself.boardCount = [0,0,0,0,0,0,0]\n\t\tcounter = 2\n\t\t\n\t\t#help from CS Major, Joh Farmer\n\t\tfor x in range(7):\n\t\t\tfor y in range(6):\n\t\t\t\tself.grid[x].append(counter)\n\t\t\t\tcounter += 1\n\t\t\t\t\n\n\tdef validClick(self, x):\n\t\t'''This function checks if there is enough space vertically for move to be valid'''\n\t\t\n\t\tif self.boardCount[x] >= 6:\n\t\t\tprint(\"Invalid Move\")\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\n\tdef drawUmove(self):\n\t\t'''This function prints the pieces onto the board at the given position from the user'''\n\t\t\n\t\tpiece = Circle(Point(self.x*100+50, 600-(self.y*100+50)),30)\n\t\tpiece.setFill('red')\n\t\tpiece.draw(self.window)\n\t\treturn\n\n\tdef handleClick(self, point):\n\t\t'''This function works with the user to add each move into the board count and to the current grid'''\n\n\t\tself.newX = point.getX()\n\t\tself.x = self.newX//100\n\t\tself.y = self.boardCount[self.x]\n\t\t\n\t\tif self.validClick(self.x):\n\t\t\tself.boardCount[self.x] += 1\n\t\t\tself.limitCounter += 1\n\t\t\tself.grid[self.x][self.y] = self.currentUser\n\t\t\t\n\t\tif self.isWon() == False:\n\t\t\tself.limitCounter += 1\n\t\t\tself.computerMove()\n\t\t\tself.drawUmove()\n\n\n\tdef isWon(self):\n\t\t'''This function checks if there is a winner in the game (True/False) and calls printWinner function'''\n\n\t#checks to see if there is a winner vertically\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tself.square3 = self.grid[i][j+2]\n\t\t\t\tself.square4 = self.grid[i][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\n\n\t#checks to see if there is a winner diagonally from lower left to upper right\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tself.square3 = self.grid[i+2][j+2]\n\t\t\t\tself.square4 = self.grid[i+3][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\n\t\t\t\t\t\n\t#checks to see if there is a winner diagonally from upper left to lower right\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tself.square3 = self.grid[i-2][j+2]\n\t\t\t\tself.square4 = self.grid[i-3][j+3]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4:\n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\t\t\t\n\t\t\t\t\t\n\t#checks to see if there is a winner horizontally\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tself.square3 = self.grid[i+2][j]\n\t\t\t\tself.square4 = self.grid[i+3][j]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3 and self.square3 == self.square4: \n\t\t\t\t\tself.printWinner(self.square1)\n\t\t\t\t\treturn True\t\t\t\t\n\t\t\t\t\t\n\t#checks if board is full without a winner (tie)\n\t\tif self.limitCounter == 42:\n\t\t\tself.printWinner(3)\n\t\t\treturn True\n\t\treturn False\n\n\n\tdef printWinner(self, winner):\n\t\t'''This function prints who the winner is or if it is a tie'''\n\t\t\n\t#if input is 3 from isWon() fxn, game is tied and so \"Tie Game!\" is printed \n\t\tif winner == 3:\n\t\t\ttxt = Text(Point(345, 300), \"Tie Game!\")\n\t\t\ttxt.setFill('white')\n\t\t\ttxt.setSize(35)\n\t\t\ttxt.draw(self.window)\n\t\t\treturn\t\t\n\t\telse:\n\t#prints \"You Won!\" if user wins\n\t\t\tif winner == 1:\n\t\t\t\ttxt = Text(Point(345, 300), \"You Won!\")\n\t\t\t\ttxt.setFill('white')\n\t\t\t\ttxt.setSize(35)\n\t\t\t\ttxt.draw(self.window)\n\t\t\t\treturn\n\t\t\telse:\n\t#prints \"Computer Won!\" if computer wins\n\t\t\t\ttxt = Text(Point(345, 300), \"Computer Won!\")\n\t\t\t\ttxt.setFill('white')\n\t\t\t\ttxt.setSize(35)\n\t\t\t\ttxt.draw(self.window)\n\t\t\t\treturn\n\n\n\tdef validCmove(self, x, y):\n\t\t'''This function checks if the computer's move will be valid'''\n\t\n\t#checks if \t'''if it tries to place it higher than the highest piece'''\n\t\tif self.boardCount[x] > y:\n\t\t\treturn False\n\t\t''' if it tries to place below the highest piece'''\n\t\tif self.boardCount[x] < y:\n\t\t\treturn False\n\t\t'''if it tries to place it in a column with 6 pieces already'''\n\t\tif self.boardCount[x] >= 6:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\t\n\n\tdef drawCmove(self, x ,y):\n\t\t'''This function adds the computer's move to the game board and adds it to the board count'''\n\n\t\tpiece = Circle(Point((x)*100+50, 600 - ((y)*100+50)),30)\n\t\tpiece.setFill('yellow')\n\t\tpiece.draw(self.window)\n\t\tself.boardCount[x] += 1\n\t\tself.grid[x][y] = -1\n\t\treturn\n\n\n\tdef computerMove(self):\n\t\t'''This function computes where the computer will put its next move and calls the drawCmove() fxn to do so.\n\t\tThe computer will add its piece to wherever there are three in a row in either color then looks to see when \n\t\tthere are two in a row. Move will be placed randomly if no pieces are placed in a row'''\n\n\t#checks if there are three pieces lined up vertically in a row and places its move to win or prevent the win'''\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tself.square3 = self.grid[i][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i,j+3):\n\t\t\t\t\t\tself.drawCmove(i,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\t\n\t#checks if there are three pieces lined up diagonally from lower left to upper right and places its move to win or prevent the win\n\t#help from CS major, Joh Farmer\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tself.square3 = self.grid[i+2][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i+3,j+3):\n\t\t\t\t\t\tself.drawCmove(i+3,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j-1):\n\t\t\t\t\t\tself.drawCmove(i-1,j-1)\n\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are three pieces lined up diagonally from lower right to upper left and places its move to win or prevent the win\t\t\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tself.square3 = self.grid[i-2][j+2]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i-3,j+3):\n\t\t\t\t\t\tself.drawCmove(i-3,j+3)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i+1,j-1):\n\t\t\t\t\t\tself.drawCmove(i+1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are three pieces lined up horizontally in a row and places its move to win or prevent the win (either side)'''\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tself.square3 = self.grid[i+2][j]\n\t\t\t\tif self.square1 == self.square2 and self.square2 == self.square3:\n\t\t\t\t\tif self.validCmove(i+3,j):\n\t\t\t\t\t\tself.drawCmove(i+3,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j):\n\t\t\t\t\t\tself.drawCmove(i-1,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\n\n\n\t#checks if there are two in a row diagonally from lower left to upper right and places its move accordingly\n\t\tfor i in range(4):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i+2,j+2):\n\t\t\t\t\t\tself.drawCmove(i+2,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j-1):\n\t\t\t\t\t\tself.drawCmove(i-1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\t\n\t#checks if there are two in a row vertically and places its move accordingly\n\t\tfor i in range(7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i,j+2):\n\t\t\t\t\t\tself.drawCmove(i,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i,j-1):\n\t\t\t\t\t\tself.drawCmove(i,j-1)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\t\t\t\t\t\n\t\t\t\t\t\t\n\t#checks if there are two in a row diagonally from lower right to upper left and places its move accordingly\t\n\t\tfor i in range(3,7):\n\t\t\tfor j in range(3):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i-1][j+1]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i-2,j+2):\n\t\t\t\t\t\tself.drawCmove(i-2,j+2)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i+1,j-1):\n\t\t\t\t\t\tself.drawCmove(i+1,j-1)\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\t\t\t\t\t\n\t#checks if there are two in a row horizontally and places its move accordingly\n\t\tfor i in range(4):\n\t\t\tfor j in range(6):\n\t\t\t\tself.square1 = self.grid[i][j]\n\t\t\t\tself.square2 = self.grid[i+1][j]\n\t\t\t\tif self.square1 == self.square2:\n\t\t\t\t\tif self.validCmove(i+2,j):\n\t\t\t\t\t\tself.drawCmove(i+2,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\tif self.validCmove(i-1,j):\n\t\t\t\t\t\tself.drawCmove(i-1,j)\n\t\t\t\t\t\treturn\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.randomMove()\n\t\t\t\t\t\treturn\n\n\t#places move randomly if no pieces are being placed in a row\n\t\telse:\n\t\t\tself.randomMove()\n\n\n\tdef randomMove(self):\n\t\t'''This function creates a random coordinate for its move, checks if it's valid, then prints the move.\n\t\tIt will continue to run until numbers are valid for current board'''\n\t\n\t\trandY = random.randint(0,6)\n\t\trandX = random.randint(0,7)\n\t\t\n\t\tif self.validCmove(randY,randX):\n\t\t\tself.drawCmove(randY,randX)\n\t\t\treturn\n\t\telse:\n\t\t\tself.randomMove()\n\n\ndef main():\n\tgameOver = False\n\tconnect4 = ConnectWindow()\n\twhile gameOver == False:\n\t\tconnect4.window.getMouse()\n\t\tgameOver = connect4.isWon()\n\tinput(\"Hit enter to quit\")\n\n\t\nmain()\n\n\n\n\n\n\n\n", "step-ids": [ 2, 8, 10, 11, 16 ] }
[ 2, 8, 10, 11, 16 ]
import json import requests import config class RequestAnnotation: def schedule(self, command: str, **kwargs): response = requests.post(url=f"http://localhost:{config.annotation_port}/{command}", json=kwargs) # not 'text' for annotating, but 'text' of response is meant here: return json.loads(response.text)
normal
{ "blob_id": "6782761bcbf53ea5076b6dfb7de66d0e68a9f45d", "index": 3123, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass RequestAnnotation:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass RequestAnnotation:\n\n def schedule(self, command: str, **kwargs):\n response = requests.post(url=\n f'http://localhost:{config.annotation_port}/{command}', json=kwargs\n )\n return json.loads(response.text)\n", "step-4": "import json\nimport requests\nimport config\n\n\nclass RequestAnnotation:\n\n def schedule(self, command: str, **kwargs):\n response = requests.post(url=\n f'http://localhost:{config.annotation_port}/{command}', json=kwargs\n )\n return json.loads(response.text)\n", "step-5": "import json\n\nimport requests\nimport config\n\nclass RequestAnnotation:\n def schedule(self,\n command: str,\n **kwargs):\n response = requests.post(url=f\"http://localhost:{config.annotation_port}/{command}\",\n json=kwargs)\n\n # not 'text' for annotating, but 'text' of response is meant here:\n return json.loads(response.text)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# # Wrappers for model evaluation # import torch import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader from modules import Classifier from typing import Generator, NamedTuple, Optional, Union from utils import expand_generator class Evaluator(object): class Result(NamedTuple): accuracy: float log_loss: float def evaluate(self, *args, **kwargs): return NotImplemented class ModelEvaluator(Evaluator): def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True): self.dataset = dataset self.mixed_precision = mixed_precision self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False) @property def num_batches(self): return len(self.loader) def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result: return expand_generator(self.evaluate_iter(model, device), return_only=True) def evaluate_iter( self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]: with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision): mean_accuracy = 0. mean_log_loss = 0. for i, (x, y) in enumerate(self.loader): x = x.to(device) y = y.to(device) logits = model(x) correct = torch.sum(logits.argmax(-1) == y).item() log_loss = F.cross_entropy(logits, y, reduction='sum').item() mean_accuracy += correct / len(self.dataset) mean_log_loss += log_loss / len(self.dataset) yield dict(batch=i) return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)
normal
{ "blob_id": "493dbf85069f2115896a5f5f5d593c8d95b85cff", "index": 4594, "step-1": "<mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n", "step-2": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n <mask token>\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n", "step-3": "<mask token>\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n", "step-4": "import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n\n\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int,\n mixed_precision: bool=True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False,\n num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.\n device, str]]=None) ->Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device),\n return_only=True)\n\n def evaluate_iter(self, model: Classifier, device: Optional[Union[torch\n .device, str]]=None) ->Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled\n =self.mixed_precision):\n mean_accuracy = 0.0\n mean_log_loss = 0.0\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n", "step-5": "#\n# Wrappers for model evaluation\n#\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom modules import Classifier\nfrom typing import Generator, NamedTuple, Optional, Union\nfrom utils import expand_generator\n\n\nclass Evaluator(object):\n class Result(NamedTuple):\n accuracy: float\n log_loss: float\n\n def evaluate(self, *args, **kwargs):\n return NotImplemented\n\n\nclass ModelEvaluator(Evaluator):\n def __init__(self, dataset: Dataset, batch_size: int, num_workers: int, mixed_precision: bool = True):\n self.dataset = dataset\n self.mixed_precision = mixed_precision\n self.loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=num_workers, drop_last=False)\n\n @property\n def num_batches(self):\n return len(self.loader)\n\n def evaluate(self, model: Classifier, device: Optional[Union[torch.device, str]] = None) -> Evaluator.Result:\n return expand_generator(self.evaluate_iter(model, device), return_only=True)\n\n def evaluate_iter(\n self,\n model: Classifier,\n device: Optional[Union[torch.device, str]] = None) -> Generator[dict, None, Evaluator.Result]:\n with model.as_eval(), torch.no_grad(), torch.cuda.amp.autocast(enabled=self.mixed_precision):\n mean_accuracy = 0.\n mean_log_loss = 0.\n for i, (x, y) in enumerate(self.loader):\n x = x.to(device)\n y = y.to(device)\n logits = model(x)\n correct = torch.sum(logits.argmax(-1) == y).item()\n log_loss = F.cross_entropy(logits, y, reduction='sum').item()\n mean_accuracy += correct / len(self.dataset)\n mean_log_loss += log_loss / len(self.dataset)\n yield dict(batch=i)\n return self.Result(accuracy=mean_accuracy, log_loss=mean_log_loss)\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
import csv import os with open("sample.csv") as rf: csv_reader=csv.DictReader(rf) with open("sample1.csv","w") as wf: csv_headers=['fname','lname','email'] if os.path.isfile('sample1.csv'): q=input("File already exists. Do you want to overwrite?") if q.lower()=='yes': csv_writer=csv.DictWriter(wf,fieldnames=csv_headers,delimiter=',') csv_writer.writeheader() for l in csv_reader: csv_writer.writerow(l) else: print("Please try with a different file name")
normal
{ "blob_id": "43196258b61801799b8d6b7d23f5816d84cb5dff", "index": 7294, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('sample.csv') as rf:\n csv_reader = csv.DictReader(rf)\n with open('sample1.csv', 'w') as wf:\n csv_headers = ['fname', 'lname', 'email']\n if os.path.isfile('sample1.csv'):\n q = input('File already exists. Do you want to overwrite?')\n if q.lower() == 'yes':\n csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,\n delimiter=',')\n csv_writer.writeheader()\n for l in csv_reader:\n csv_writer.writerow(l)\n else:\n print('Please try with a different file name')\n", "step-3": "import csv\nimport os\nwith open('sample.csv') as rf:\n csv_reader = csv.DictReader(rf)\n with open('sample1.csv', 'w') as wf:\n csv_headers = ['fname', 'lname', 'email']\n if os.path.isfile('sample1.csv'):\n q = input('File already exists. Do you want to overwrite?')\n if q.lower() == 'yes':\n csv_writer = csv.DictWriter(wf, fieldnames=csv_headers,\n delimiter=',')\n csv_writer.writeheader()\n for l in csv_reader:\n csv_writer.writerow(l)\n else:\n print('Please try with a different file name')\n", "step-4": "import csv\r\nimport os\r\nwith open(\"sample.csv\") as rf:\r\n csv_reader=csv.DictReader(rf)\r\n with open(\"sample1.csv\",\"w\") as wf:\r\n csv_headers=['fname','lname','email']\r\n if os.path.isfile('sample1.csv'):\r\n q=input(\"File already exists. Do you want to overwrite?\")\r\n if q.lower()=='yes':\r\n csv_writer=csv.DictWriter(wf,fieldnames=csv_headers,delimiter=',')\r\n csv_writer.writeheader()\r\n for l in csv_reader:\r\n \r\n csv_writer.writerow(l)\r\n else:\r\n print(\"Please try with a different file name\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from datetime import datetime from random import seed from pandas import date_range, DataFrame import matplotlib.pyplot as plt from matplotlib import style from numpy import asarray import strategy_learner as sl from util import get_data style.use('ggplot') seed(0) def run_algo(sym, investment, start_date, end_date, bench_sym): # instantiate the strategy learner learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose) # train the learner learner.add_evidence(symbol=sym, start_date=start_date, end_date=end_date, investment=investment) # get some data for reference syms = [sym] dates = date_range(start_date, end_date) prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym) prices = prices_all[syms] # test the learner df_trades = learner.test_policy(symbol=sym, start_date=start_date, end_date=end_date, investment=investment) return df_trades def evaluate(sym, orders, start_val, fee, slippage, bench_sym): # Read orders file orders_df = orders orders_df.sort_index(inplace=True) start_date = orders_df.index[0] end_date = orders_df.index[-1] # Collect price data for each ticker in order df_prices = get_data(symbols=[sym], dates=date_range(start_date, end_date), bench_sym=bench_sym) df_prices = df_prices.drop(bench_sym, 1) df_prices["cash"] = 1 # Track trade data df_trades = df_prices.copy() df_trades[:] = 0 # Populate trade dataframe for i, date in enumerate(orders_df.index): # Get order information if orders_df.Order[i] == "BUY": order = 1 else: order = -1 # Start with 1/2 position at first if i == 0: shares = 100 else: shares = 200 # Calculate change in shares and cash df_trades[sym][date] += order * shares df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[sym][date] - fee # Track total holdings df_holdings = df_prices.copy() df_holdings[:] = 0 # Include starting value df_holdings['cash'][0] = start_val # Update first day of holdings for c in df_trades.columns: df_holdings[c][0] += df_trades[c][0] # Update every day, adding new day's trade information with previous day's holdings for i in range(1, len(df_trades.index)): for c in df_trades.columns: df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1] # Track monetary values df_values = df_prices.mul(df_holdings) # Define port_val port_val = df_values.sum(axis=1) return port_val if __name__ == "__main__": symbol = "NASDAQ1001440" bench_sym = "S&P5001440" verbose = False investment = 100000 # 100k = 100 contracts fee = 0 slippage = 0.0025 # in % start_date_insample = datetime(2013, 5, 1) end_date_insample = datetime(2015, 5, 1) start_date_outsample = datetime(2015, 5, 2) end_date_outsample = datetime(2017, 12, 7) # Train df_trades_in, benchmark_in = run_algo(sym=symbol, investment=investment, start_date=start_date_insample, end_date=end_date_insample, bench_sym=bench_sym) df_trades_out, benchmark_out = run_algo(sym=symbol, investment=investment, start_date=start_date_outsample, end_date=end_date_outsample, bench_sym=bench_sym) # Evaluate insample = evaluate(sym=symbol, orders=df_trades_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym) insample = DataFrame(insample) bench_insample = evaluate(sym=symbol, orders=benchmark_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym) bench_insample = DataFrame(bench_insample) outsample = evaluate(sym=symbol, orders=df_trades_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym) outsample = DataFrame(outsample) bench_outsample = evaluate(sym=symbol, orders=benchmark_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym) bench_outsample = DataFrame(bench_outsample) # Cumulative returns port_ret_in = float(asarray(insample.values)[-1]) port_ret_out = float(asarray(outsample.values)[-1]) bench_ret_in = float(asarray(bench_insample.values)[-1]) bench_ret_out = float(asarray(bench_outsample.values)[-1]) # Print results print() print("Cumulative return in-sample:\t\t${:,.2f}\t\t(+{:.2f} %)".format(port_ret_in - investment, 100 * (port_ret_in - investment) / investment)) print("Benchmark return in-sample:\t\t\t${:,.2f}\t\t(+{:.2f} %)".format(bench_ret_in - investment, 100 * (bench_ret_in - investment) / investment)) print("Cumulative return out-of-sample:\t${:,.2f}\t\t(+{:.2f} %)".format(port_ret_out - investment, 100 * (port_ret_out - investment) / investment)) print("Benchmark return out-of-sample:\t\t${:,.2f}\t\t(+{:.2f} %)".format(bench_ret_out - investment, 100 * (bench_ret_out - investment) / investment)) # Plot charts plt.subplot(1, 2, 1) plt.plot(insample.index, insample, c="mediumseagreen", lw=3) plt.plot(bench_insample.index, bench_insample, c="skyblue") plt.legend(["Strategy", "Buy and Hold"]) plt.title("In-sample") plt.xlabel("Date") plt.ylabel("Value") plt.subplot(1, 2, 2) plt.plot(outsample.index, outsample, c="mediumseagreen", lw=3) plt.plot(bench_outsample.index, bench_outsample, c="skyblue") plt.legend(["Strategy", "Buy and Hold"]) plt.title("Out-of-sample") plt.xlabel("Date") plt.ylabel("Value") plt.show()
normal
{ "blob_id": "c0f9a1c39ff5d7cc99a16cf00cddcc14705937ba", "index": 3917, "step-1": "<mask token>\n\n\ndef run_algo(sym, investment, start_date, end_date, bench_sym):\n learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)\n learner.add_evidence(symbol=sym, start_date=start_date, end_date=\n end_date, investment=investment)\n syms = [sym]\n dates = date_range(start_date, end_date)\n prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)\n prices = prices_all[syms]\n df_trades = learner.test_policy(symbol=sym, start_date=start_date,\n end_date=end_date, investment=investment)\n return df_trades\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef run_algo(sym, investment, start_date, end_date, bench_sym):\n learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)\n learner.add_evidence(symbol=sym, start_date=start_date, end_date=\n end_date, investment=investment)\n syms = [sym]\n dates = date_range(start_date, end_date)\n prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)\n prices = prices_all[syms]\n df_trades = learner.test_policy(symbol=sym, start_date=start_date,\n end_date=end_date, investment=investment)\n return df_trades\n\n\ndef evaluate(sym, orders, start_val, fee, slippage, bench_sym):\n orders_df = orders\n orders_df.sort_index(inplace=True)\n start_date = orders_df.index[0]\n end_date = orders_df.index[-1]\n df_prices = get_data(symbols=[sym], dates=date_range(start_date,\n end_date), bench_sym=bench_sym)\n df_prices = df_prices.drop(bench_sym, 1)\n df_prices['cash'] = 1\n df_trades = df_prices.copy()\n df_trades[:] = 0\n for i, date in enumerate(orders_df.index):\n if orders_df.Order[i] == 'BUY':\n order = 1\n else:\n order = -1\n if i == 0:\n shares = 100\n else:\n shares = 200\n df_trades[sym][date] += order * shares\n df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[\n sym][date] - fee\n df_holdings = df_prices.copy()\n df_holdings[:] = 0\n df_holdings['cash'][0] = start_val\n for c in df_trades.columns:\n df_holdings[c][0] += df_trades[c][0]\n for i in range(1, len(df_trades.index)):\n for c in df_trades.columns:\n df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1]\n df_values = df_prices.mul(df_holdings)\n port_val = df_values.sum(axis=1)\n return port_val\n\n\n<mask token>\n", "step-3": "<mask token>\nstyle.use('ggplot')\nseed(0)\n\n\ndef run_algo(sym, investment, start_date, end_date, bench_sym):\n learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)\n learner.add_evidence(symbol=sym, start_date=start_date, end_date=\n end_date, investment=investment)\n syms = [sym]\n dates = date_range(start_date, end_date)\n prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)\n prices = prices_all[syms]\n df_trades = learner.test_policy(symbol=sym, start_date=start_date,\n end_date=end_date, investment=investment)\n return df_trades\n\n\ndef evaluate(sym, orders, start_val, fee, slippage, bench_sym):\n orders_df = orders\n orders_df.sort_index(inplace=True)\n start_date = orders_df.index[0]\n end_date = orders_df.index[-1]\n df_prices = get_data(symbols=[sym], dates=date_range(start_date,\n end_date), bench_sym=bench_sym)\n df_prices = df_prices.drop(bench_sym, 1)\n df_prices['cash'] = 1\n df_trades = df_prices.copy()\n df_trades[:] = 0\n for i, date in enumerate(orders_df.index):\n if orders_df.Order[i] == 'BUY':\n order = 1\n else:\n order = -1\n if i == 0:\n shares = 100\n else:\n shares = 200\n df_trades[sym][date] += order * shares\n df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[\n sym][date] - fee\n df_holdings = df_prices.copy()\n df_holdings[:] = 0\n df_holdings['cash'][0] = start_val\n for c in df_trades.columns:\n df_holdings[c][0] += df_trades[c][0]\n for i in range(1, len(df_trades.index)):\n for c in df_trades.columns:\n df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1]\n df_values = df_prices.mul(df_holdings)\n port_val = df_values.sum(axis=1)\n return port_val\n\n\nif __name__ == '__main__':\n symbol = 'NASDAQ1001440'\n bench_sym = 'S&P5001440'\n verbose = False\n investment = 100000\n fee = 0\n slippage = 0.0025\n start_date_insample = datetime(2013, 5, 1)\n end_date_insample = datetime(2015, 5, 1)\n start_date_outsample = datetime(2015, 5, 2)\n end_date_outsample = datetime(2017, 12, 7)\n df_trades_in, benchmark_in = run_algo(sym=symbol, investment=investment,\n start_date=start_date_insample, end_date=end_date_insample,\n bench_sym=bench_sym)\n df_trades_out, benchmark_out = run_algo(sym=symbol, investment=\n investment, start_date=start_date_outsample, end_date=\n end_date_outsample, bench_sym=bench_sym)\n insample = evaluate(sym=symbol, orders=df_trades_in, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n insample = DataFrame(insample)\n bench_insample = evaluate(sym=symbol, orders=benchmark_in, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_insample = DataFrame(bench_insample)\n outsample = evaluate(sym=symbol, orders=df_trades_out, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n outsample = DataFrame(outsample)\n bench_outsample = evaluate(sym=symbol, orders=benchmark_out, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_outsample = DataFrame(bench_outsample)\n port_ret_in = float(asarray(insample.values)[-1])\n port_ret_out = float(asarray(outsample.values)[-1])\n bench_ret_in = float(asarray(bench_insample.values)[-1])\n bench_ret_out = float(asarray(bench_outsample.values)[-1])\n print()\n print('Cumulative return in-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.format(\n port_ret_in - investment, 100 * (port_ret_in - investment) /\n investment))\n print('Benchmark return in-sample:\\t\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.format\n (bench_ret_in - investment, 100 * (bench_ret_in - investment) /\n investment))\n print('Cumulative return out-of-sample:\\t${:,.2f}\\t\\t(+{:.2f} %)'.\n format(port_ret_out - investment, 100 * (port_ret_out - investment) /\n investment))\n print('Benchmark return out-of-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.\n format(bench_ret_out - investment, 100 * (bench_ret_out -\n investment) / investment))\n plt.subplot(1, 2, 1)\n plt.plot(insample.index, insample, c='mediumseagreen', lw=3)\n plt.plot(bench_insample.index, bench_insample, c='skyblue')\n plt.legend(['Strategy', 'Buy and Hold'])\n plt.title('In-sample')\n plt.xlabel('Date')\n plt.ylabel('Value')\n plt.subplot(1, 2, 2)\n plt.plot(outsample.index, outsample, c='mediumseagreen', lw=3)\n plt.plot(bench_outsample.index, bench_outsample, c='skyblue')\n plt.legend(['Strategy', 'Buy and Hold'])\n plt.title('Out-of-sample')\n plt.xlabel('Date')\n plt.ylabel('Value')\n plt.show()\n", "step-4": "from datetime import datetime\nfrom random import seed\nfrom pandas import date_range, DataFrame\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom numpy import asarray\nimport strategy_learner as sl\nfrom util import get_data\nstyle.use('ggplot')\nseed(0)\n\n\ndef run_algo(sym, investment, start_date, end_date, bench_sym):\n learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)\n learner.add_evidence(symbol=sym, start_date=start_date, end_date=\n end_date, investment=investment)\n syms = [sym]\n dates = date_range(start_date, end_date)\n prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)\n prices = prices_all[syms]\n df_trades = learner.test_policy(symbol=sym, start_date=start_date,\n end_date=end_date, investment=investment)\n return df_trades\n\n\ndef evaluate(sym, orders, start_val, fee, slippage, bench_sym):\n orders_df = orders\n orders_df.sort_index(inplace=True)\n start_date = orders_df.index[0]\n end_date = orders_df.index[-1]\n df_prices = get_data(symbols=[sym], dates=date_range(start_date,\n end_date), bench_sym=bench_sym)\n df_prices = df_prices.drop(bench_sym, 1)\n df_prices['cash'] = 1\n df_trades = df_prices.copy()\n df_trades[:] = 0\n for i, date in enumerate(orders_df.index):\n if orders_df.Order[i] == 'BUY':\n order = 1\n else:\n order = -1\n if i == 0:\n shares = 100\n else:\n shares = 200\n df_trades[sym][date] += order * shares\n df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[\n sym][date] - fee\n df_holdings = df_prices.copy()\n df_holdings[:] = 0\n df_holdings['cash'][0] = start_val\n for c in df_trades.columns:\n df_holdings[c][0] += df_trades[c][0]\n for i in range(1, len(df_trades.index)):\n for c in df_trades.columns:\n df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1]\n df_values = df_prices.mul(df_holdings)\n port_val = df_values.sum(axis=1)\n return port_val\n\n\nif __name__ == '__main__':\n symbol = 'NASDAQ1001440'\n bench_sym = 'S&P5001440'\n verbose = False\n investment = 100000\n fee = 0\n slippage = 0.0025\n start_date_insample = datetime(2013, 5, 1)\n end_date_insample = datetime(2015, 5, 1)\n start_date_outsample = datetime(2015, 5, 2)\n end_date_outsample = datetime(2017, 12, 7)\n df_trades_in, benchmark_in = run_algo(sym=symbol, investment=investment,\n start_date=start_date_insample, end_date=end_date_insample,\n bench_sym=bench_sym)\n df_trades_out, benchmark_out = run_algo(sym=symbol, investment=\n investment, start_date=start_date_outsample, end_date=\n end_date_outsample, bench_sym=bench_sym)\n insample = evaluate(sym=symbol, orders=df_trades_in, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n insample = DataFrame(insample)\n bench_insample = evaluate(sym=symbol, orders=benchmark_in, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_insample = DataFrame(bench_insample)\n outsample = evaluate(sym=symbol, orders=df_trades_out, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n outsample = DataFrame(outsample)\n bench_outsample = evaluate(sym=symbol, orders=benchmark_out, start_val=\n investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_outsample = DataFrame(bench_outsample)\n port_ret_in = float(asarray(insample.values)[-1])\n port_ret_out = float(asarray(outsample.values)[-1])\n bench_ret_in = float(asarray(bench_insample.values)[-1])\n bench_ret_out = float(asarray(bench_outsample.values)[-1])\n print()\n print('Cumulative return in-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.format(\n port_ret_in - investment, 100 * (port_ret_in - investment) /\n investment))\n print('Benchmark return in-sample:\\t\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.format\n (bench_ret_in - investment, 100 * (bench_ret_in - investment) /\n investment))\n print('Cumulative return out-of-sample:\\t${:,.2f}\\t\\t(+{:.2f} %)'.\n format(port_ret_out - investment, 100 * (port_ret_out - investment) /\n investment))\n print('Benchmark return out-of-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)'.\n format(bench_ret_out - investment, 100 * (bench_ret_out -\n investment) / investment))\n plt.subplot(1, 2, 1)\n plt.plot(insample.index, insample, c='mediumseagreen', lw=3)\n plt.plot(bench_insample.index, bench_insample, c='skyblue')\n plt.legend(['Strategy', 'Buy and Hold'])\n plt.title('In-sample')\n plt.xlabel('Date')\n plt.ylabel('Value')\n plt.subplot(1, 2, 2)\n plt.plot(outsample.index, outsample, c='mediumseagreen', lw=3)\n plt.plot(bench_outsample.index, bench_outsample, c='skyblue')\n plt.legend(['Strategy', 'Buy and Hold'])\n plt.title('Out-of-sample')\n plt.xlabel('Date')\n plt.ylabel('Value')\n plt.show()\n", "step-5": "from datetime import datetime\nfrom random import seed\n\nfrom pandas import date_range, DataFrame\nimport matplotlib.pyplot as plt\nfrom matplotlib import style\nfrom numpy import asarray\n\nimport strategy_learner as sl\nfrom util import get_data\n\nstyle.use('ggplot')\nseed(0)\n\ndef run_algo(sym, investment, start_date, end_date, bench_sym):\n # instantiate the strategy learner\n learner = sl.StrategyLearner(bench_sym=bench_sym, verbose=verbose)\n\n # train the learner\n learner.add_evidence(symbol=sym, start_date=start_date, end_date=end_date, investment=investment)\n\n # get some data for reference\n syms = [sym]\n dates = date_range(start_date, end_date)\n prices_all = get_data(symbols=syms, dates=dates, bench_sym=bench_sym)\n prices = prices_all[syms]\n\n # test the learner\n df_trades = learner.test_policy(symbol=sym, start_date=start_date, end_date=end_date, investment=investment)\n\n return df_trades\n\n\ndef evaluate(sym, orders, start_val, fee, slippage, bench_sym):\n # Read orders file\n orders_df = orders\n\n orders_df.sort_index(inplace=True)\n start_date = orders_df.index[0]\n end_date = orders_df.index[-1]\n\n # Collect price data for each ticker in order\n df_prices = get_data(symbols=[sym], dates=date_range(start_date, end_date), bench_sym=bench_sym)\n df_prices = df_prices.drop(bench_sym, 1)\n df_prices[\"cash\"] = 1\n\n # Track trade data\n df_trades = df_prices.copy()\n df_trades[:] = 0\n\n # Populate trade dataframe\n for i, date in enumerate(orders_df.index):\n # Get order information\n if orders_df.Order[i] == \"BUY\":\n order = 1\n else:\n order = -1\n\n # Start with 1/2 position at first\n if i == 0:\n shares = 100\n else:\n shares = 200\n\n # Calculate change in shares and cash\n df_trades[sym][date] += order * shares\n df_trades['cash'][date] -= order * (1 - slippage) * shares * df_prices[sym][date] - fee\n\n # Track total holdings\n df_holdings = df_prices.copy()\n df_holdings[:] = 0\n\n # Include starting value\n df_holdings['cash'][0] = start_val\n\n # Update first day of holdings\n for c in df_trades.columns:\n df_holdings[c][0] += df_trades[c][0]\n\n # Update every day, adding new day's trade information with previous day's holdings\n for i in range(1, len(df_trades.index)):\n for c in df_trades.columns:\n df_holdings[c][i] += df_trades[c][i] + df_holdings[c][i - 1]\n\n # Track monetary values\n df_values = df_prices.mul(df_holdings)\n\n # Define port_val\n port_val = df_values.sum(axis=1)\n\n return port_val\n\n\nif __name__ == \"__main__\":\n symbol = \"NASDAQ1001440\"\n bench_sym = \"S&P5001440\"\n verbose = False\n investment = 100000 # 100k = 100 contracts\n fee = 0\n slippage = 0.0025 # in %\n start_date_insample = datetime(2013, 5, 1)\n end_date_insample = datetime(2015, 5, 1)\n start_date_outsample = datetime(2015, 5, 2)\n end_date_outsample = datetime(2017, 12, 7)\n\n # Train\n df_trades_in, benchmark_in = run_algo(sym=symbol, investment=investment, start_date=start_date_insample, end_date=end_date_insample, bench_sym=bench_sym)\n df_trades_out, benchmark_out = run_algo(sym=symbol, investment=investment, start_date=start_date_outsample, end_date=end_date_outsample, bench_sym=bench_sym)\n\n # Evaluate\n insample = evaluate(sym=symbol, orders=df_trades_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n insample = DataFrame(insample)\n bench_insample = evaluate(sym=symbol, orders=benchmark_in, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_insample = DataFrame(bench_insample)\n outsample = evaluate(sym=symbol, orders=df_trades_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n outsample = DataFrame(outsample)\n bench_outsample = evaluate(sym=symbol, orders=benchmark_out, start_val=investment, fee=fee, slippage=slippage, bench_sym=bench_sym)\n bench_outsample = DataFrame(bench_outsample)\n\n # Cumulative returns\n port_ret_in = float(asarray(insample.values)[-1])\n port_ret_out = float(asarray(outsample.values)[-1])\n bench_ret_in = float(asarray(bench_insample.values)[-1])\n bench_ret_out = float(asarray(bench_outsample.values)[-1])\n\n # Print results\n print()\n print(\"Cumulative return in-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)\".format(port_ret_in - investment, 100 * (port_ret_in - investment) / investment))\n print(\"Benchmark return in-sample:\\t\\t\\t${:,.2f}\\t\\t(+{:.2f} %)\".format(bench_ret_in - investment, 100 * (bench_ret_in - investment) / investment))\n print(\"Cumulative return out-of-sample:\\t${:,.2f}\\t\\t(+{:.2f} %)\".format(port_ret_out - investment, 100 * (port_ret_out - investment) / investment))\n print(\"Benchmark return out-of-sample:\\t\\t${:,.2f}\\t\\t(+{:.2f} %)\".format(bench_ret_out - investment, 100 * (bench_ret_out - investment) / investment))\n\n # Plot charts\n plt.subplot(1, 2, 1)\n plt.plot(insample.index, insample, c=\"mediumseagreen\", lw=3)\n plt.plot(bench_insample.index, bench_insample, c=\"skyblue\")\n plt.legend([\"Strategy\", \"Buy and Hold\"])\n plt.title(\"In-sample\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Value\")\n\n plt.subplot(1, 2, 2)\n plt.plot(outsample.index, outsample, c=\"mediumseagreen\", lw=3)\n plt.plot(bench_outsample.index, bench_outsample, c=\"skyblue\")\n plt.legend([\"Strategy\", \"Buy and Hold\"])\n plt.title(\"Out-of-sample\")\n plt.xlabel(\"Date\")\n plt.ylabel(\"Value\")\n plt.show()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class Solution: def evalRPN(self, tokens: List[str]) -> int: def operation(op1,op2,op): if op == "+": return op1 + op2 if op == "-": return op1 - op2 if op == "*": return op1 * op2 if op == "/": return int(op1/op2) stack = [] for char in tokens: if char in ["+", "-", "*", "/"]: op2 = stack.pop() op1 = stack.pop() res = operation(op1,op2,char) stack.append(int(res)) else: stack.append(int(char)) return stack.pop()
normal
{ "blob_id": "6b597f1570c022d17e4476e2ab8817e724a166a7", "index": 1096, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def evalRPN(self, tokens: List[str]) ->int:\n\n def operation(op1, op2, op):\n if op == '+':\n return op1 + op2\n if op == '-':\n return op1 - op2\n if op == '*':\n return op1 * op2\n if op == '/':\n return int(op1 / op2)\n stack = []\n for char in tokens:\n if char in ['+', '-', '*', '/']:\n op2 = stack.pop()\n op1 = stack.pop()\n res = operation(op1, op2, char)\n stack.append(int(res))\n else:\n stack.append(int(char))\n return stack.pop()\n", "step-4": "class Solution:\r\n def evalRPN(self, tokens: List[str]) -> int:\r\n def operation(op1,op2,op):\r\n if op == \"+\":\r\n return op1 + op2\r\n if op == \"-\":\r\n return op1 - op2\r\n if op == \"*\":\r\n return op1 * op2\r\n if op == \"/\":\r\n return int(op1/op2)\r\n \r\n stack = []\r\n for char in tokens:\r\n if char in [\"+\", \"-\", \"*\", \"/\"]:\r\n op2 = stack.pop()\r\n op1 = stack.pop()\r\n res = operation(op1,op2,char)\r\n stack.append(int(res))\r\n else:\r\n stack.append(int(char))\r\n return stack.pop()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from django.db import models class crontab(models.Model): name = models.CharField(max_length=20) class converter(models.Model): name = models.CharField(max_length=20) class MainTable(models.Model): rank = models.IntegerField(null=True) coinid = models.CharField(max_length=30,null=True) symbol = models.CharField(max_length=10) name = models.CharField(max_length=30) thumbimg = models.CharField(max_length=30) marketcap = models.FloatField(null=True) totalvolume = models.FloatField(null=True) price_change = models.FloatField(null=True) pricechangepercentage = models.FloatField(null=True) onehourchange = models.FloatField(null=True) sevendaychange = models.FloatField(null=True) circulating_supply = models.FloatField(null=True) class Table(models.Model): name = models.CharField(max_length=30) coinid = models.CharField(max_length=30) symbol = models.CharField(max_length=20) img = models.CharField(max_length=50) image = models.CharField(max_length=50) class Price(models.Model): price = models.FloatField(null=True) class Marketdata(models.Model): price_change_24h = models.FloatField(null=True) price_change_percentage_24h = models.FloatField(null=True)
normal
{ "blob_id": "0054921928838d9aee63cf58f50a0a01ee12635d", "index": 6049, "step-1": "<mask token>\n\n\nclass Table(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n", "step-2": "<mask token>\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n", "step-3": "<mask token>\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30, null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n", "step-4": "<mask token>\n\n\nclass crontab(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30, null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)\n", "step-5": "from django.db import models\n\nclass crontab(models.Model):\n name = models.CharField(max_length=20)\n\n\nclass converter(models.Model):\n name = models.CharField(max_length=20)\n\nclass MainTable(models.Model):\n rank = models.IntegerField(null=True)\n coinid = models.CharField(max_length=30,null=True)\n symbol = models.CharField(max_length=10)\n name = models.CharField(max_length=30)\n thumbimg = models.CharField(max_length=30)\n marketcap = models.FloatField(null=True)\n totalvolume = models.FloatField(null=True)\n price_change = models.FloatField(null=True)\n pricechangepercentage = models.FloatField(null=True)\n onehourchange = models.FloatField(null=True)\n sevendaychange = models.FloatField(null=True)\n circulating_supply = models.FloatField(null=True)\n\nclass Table(models.Model):\n name = models.CharField(max_length=30)\n coinid = models.CharField(max_length=30)\n symbol = models.CharField(max_length=20)\n img = models.CharField(max_length=50)\n image = models.CharField(max_length=50)\n\nclass Price(models.Model):\n price = models.FloatField(null=True)\n\nclass Marketdata(models.Model):\n price_change_24h = models.FloatField(null=True)\n price_change_percentage_24h = models.FloatField(null=True)", "step-ids": [ 5, 6, 10, 12, 14 ] }
[ 5, 6, 10, 12, 14 ]
#TODO: allow workers to pull this from cache RABBITMQ_IP = '172.23.105.82' OBJECT_CACHE_IP = "172.23.105.69" OBJECT_CACHE_PORT = "11911" SERIESLY_IP = '' COUCHBASE_IP = '172.23.105.54' COUCHBASE_PORT = '8091' COUCHBASE_USER = "Administrator" COUCHBASE_PWD = "password" SSH_USER = "root" SSH_PASSWORD = "password" WORKERS = ['127.0.0.1'] WORKER_CONFIGS = ["all"] CB_CLUSTER_TAG = "default" CLUSTER_IPS = ["172.23.105.54", "172.23.105.57", "172.23.105.62", "172.23.105.55"] # xdcr config """ " pointer information to remote sites " remote1 = name for remote site " RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts) " this should equal RABBITMQ_IP of remote site " CB_CLUSTER_TAG = represents vhost watched by workers remote site. " this should equal CB_CLUSTER_TAG of remote site " COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site """ REMOTE_SITES = {"remote1" : {"RABBITMQ_IP" : "172.23.105.99", "CB_CLUSTER_TAG" : "default", "COUCHBASE_IP" : "172.23.105.58", "COUCHBASE_PORT" : "8091"}} LOGDIR="logs" # relative to current dir #Backup Config ENABLE_BACKUPS = False BACKUP_DIR = "/tmp/backup" BACKUP_NODE_IP = "127.0.0.1" BACKUP_NODE_SSH_USER = "root" BACKUP_NODE_SSH_PWD = "password"
normal
{ "blob_id": "e70ebd9bb9cd7027772ec117cb91349afba7ab10", "index": 6390, "step-1": "<mask token>\n", "step-2": "RABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = '172.23.105.69'\nOBJECT_CACHE_PORT = '11911'\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = 'Administrator'\nCOUCHBASE_PWD = 'password'\nSSH_USER = 'root'\nSSH_PASSWORD = 'password'\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = ['all']\nCB_CLUSTER_TAG = 'default'\nCLUSTER_IPS = ['172.23.105.54', '172.23.105.57', '172.23.105.62',\n '172.23.105.55']\n<mask token>\nREMOTE_SITES = {'remote1': {'RABBITMQ_IP': '172.23.105.99',\n 'CB_CLUSTER_TAG': 'default', 'COUCHBASE_IP': '172.23.105.58',\n 'COUCHBASE_PORT': '8091'}}\nLOGDIR = 'logs'\nENABLE_BACKUPS = False\nBACKUP_DIR = '/tmp/backup'\nBACKUP_NODE_IP = '127.0.0.1'\nBACKUP_NODE_SSH_USER = 'root'\nBACKUP_NODE_SSH_PWD = 'password'\n", "step-3": "#TODO: allow workers to pull this from cache\n\nRABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = \"172.23.105.69\"\nOBJECT_CACHE_PORT = \"11911\"\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = \"Administrator\"\nCOUCHBASE_PWD = \"password\"\nSSH_USER = \"root\"\nSSH_PASSWORD = \"password\"\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = [\"all\"]\nCB_CLUSTER_TAG = \"default\"\n\nCLUSTER_IPS = [\"172.23.105.54\", \"172.23.105.57\", \"172.23.105.62\", \"172.23.105.55\"]\n\n# xdcr config\n\"\"\"\n\" pointer information to remote sites\n\" remote1 = name for remote site\n\" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)\n\" this should equal RABBITMQ_IP of remote site\n\" CB_CLUSTER_TAG = represents vhost watched by workers remote site.\n\" this should equal CB_CLUSTER_TAG of remote site\n\" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site\n\"\"\"\nREMOTE_SITES = {\"remote1\" : {\"RABBITMQ_IP\" : \"172.23.105.99\",\n \"CB_CLUSTER_TAG\" : \"default\",\n \"COUCHBASE_IP\" : \"172.23.105.58\",\n \"COUCHBASE_PORT\" : \"8091\"}}\n\nLOGDIR=\"logs\" # relative to current dir\n\n\n#Backup Config\nENABLE_BACKUPS = False\nBACKUP_DIR = \"/tmp/backup\"\nBACKUP_NODE_IP = \"127.0.0.1\"\nBACKUP_NODE_SSH_USER = \"root\"\nBACKUP_NODE_SSH_PWD = \"password\"\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
#This is a file from CS50 Finance from functools import wraps from flask import redirect, render_template, session from threading import Thread from flask_mail import Message from application import app, mail ALLOWED_EXTENSIONS = {"png", "PNG", "jpg", "jpeg", "JPG", "JPEG"} def login_required(f): """ Decorate routes to require login. http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/ """ @wraps(f) def decorated_function(*args, **kwargs): if session.get("user_id") is None: return redirect("/sign_in") return f(*args, **kwargs) return decorated_function def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS # Send message function def async_send_mail(applic, msg): with applic.app_context(): mail.send(msg) def send_mail(subject, recipient, template, **kwargs): msg = Message(subject, recipients=[recipient]) msg.html = render_template(template, **kwargs) thr = Thread(target=async_send_mail, args=[app, msg]) thr.start() return thr
normal
{ "blob_id": "1a4da621add157fa6d1f578370d64594b102eeb5", "index": 4245, "step-1": "<mask token>\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n<mask token>\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n", "step-2": "<mask token>\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n", "step-3": "<mask token>\nALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n", "step-4": "from functools import wraps\nfrom flask import redirect, render_template, session\nfrom threading import Thread\nfrom flask_mail import Message\nfrom application import app, mail\nALLOWED_EXTENSIONS = {'png', 'PNG', 'jpg', 'jpeg', 'JPG', 'JPEG'}\n\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get('user_id') is None:\n return redirect('/sign_in')\n return f(*args, **kwargs)\n return decorated_function\n\n\ndef allowed_file(filename):\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr\n", "step-5": "#This is a file from CS50 Finance\nfrom functools import wraps\n\nfrom flask import redirect, render_template, session\nfrom threading import Thread\nfrom flask_mail import Message\nfrom application import app, mail\n\nALLOWED_EXTENSIONS = {\"png\", \"PNG\", \"jpg\", \"jpeg\", \"JPG\", \"JPEG\"}\n\ndef login_required(f):\n \"\"\"\n Decorate routes to require login.\n\n http://flask.pocoo.org/docs/1.0/patterns/viewdecorators/\n \"\"\"\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/sign_in\")\n return f(*args, **kwargs)\n return decorated_function\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n# Send message function\ndef async_send_mail(applic, msg):\n with applic.app_context():\n mail.send(msg)\n\ndef send_mail(subject, recipient, template, **kwargs):\n msg = Message(subject, recipients=[recipient])\n msg.html = render_template(template, **kwargs)\n thr = Thread(target=async_send_mail, args=[app, msg])\n thr.start()\n return thr", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
""" Created on Feb 10, 2013 @author: jens Deprecated module for crystallogrphy related geometry operations. And a lot of other stuff that I put here. """ import numpy as np atomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36} covalence_radius = {'H': .37, 'He': .0, 'Li': 1.23, 'Be': .90, 'B': .80, 'C': .77, 'N': .74, 'O': .71, 'F': .72, 'Ne': 0., 'Na': 1.54, 'Mg': 1.36, 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': .99, 'Ar': 0., 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22, 'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15, 'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20, 'Se': 1.16, 'Br': 1.14, 'Kr': 0., 'Rb': 2.18} # , 191, 162, 145, 134, 130, 127, 125, 125, 128, 134, 148, 144, 141, 140, 136, 133, 0, 235, 198, 169, 165, 165, 164, 164, 162, 185, 161, 159, 159, 157, 157, 156, 170, 156, 144, 134, 130, 128, 126, 127, 130, 134, 149, 148, 147, 146, 146, 145, 0, 0, 0, 188, 165, 161, 142, 130, 151, 182, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} electro_negativ = {'H': 2.20, 'He': 5.50, 'Li': .97, 'Be': 1.47, 'B': 2.01, 'C': 2.50, 'N': 3.07, 'O': 3.50, 'F': 4.40, 'Ne': 4.80, 'Na': 1.01, 'Mg': 1.23, 'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': 3.20, 'K': .91, 'Ca': 1.04, 'Sc': 1.20, 'Ti': 1.32, 'V': 1.45, 'Cr': 1.56, 'Mn': 1.60, 'Fe': 1.64, 'Co': 1.70, 'Ni': 1.75, 'Cu': 1.75, 'Zn': 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.20, 'Se': 2.48, 'Br': 2.74, 'Kr': 2.90, 'Rb': .89} # , 99, 111, 122, 123, 130, 136, 142, 145, 130, 142, 146, 149, 172, 182, 201, 221, 240, 86, 97, 108, 108, 107, 107, 107, 107, 110, 111, 110, 110, 110, 111, 111, 106, 114, 123, 133, 140, 146, 152, 155, 142, 142, 144, 144, 155, 167 } proton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B': '005', 'C': '006', 'N': '007', 'O': '008', 'F': '009', 'Ne': '010', 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015', 'S': '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021', 'Ti': '022', 'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co': '027', 'Ni': '028', 'Cu': '029', 'Zn': '030', 'Ga': '031', 'Ge': '032', 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'} number_proton = dict([[v, k] for k, v in proton_number.items()]) priority = {'3': '5', '2': '4', '1.5': '3', '6': '2', '5': '1', '1': '0'} def frac2cart(coords, matrix): coords = np.dot(matrix, coords).flatten().tolist()[0] return coords def xd_element(name): """ Return the element of an atom as defined in it's label. """ try: name = name[:2] except: pass try: covalence_radius[name] except: name = name[0] return name def Uiso(adp, mean='geometric'): try: adp = get_adp_as_matrix(adp) eigvals = np.linalg.eigvals(adp) if mean == 'geometric': return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (1. / 3.) elif mean == 'arithmetic': return sum(eigvals) / 3. else: print('crystgeom: Error: please specify mean as \'geometric\' or \'arithmetic\'') exit() except: return adp def get_adp_as_matrix(adp): if adp is None: return None return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [adp[4], adp[5], adp[2]]]) def get_compound_properties(path): """ Reads a *.FChk file and returns a list containing the charge of the compound, the number of electrons in the compound, the overall lengths of the dipole moment vector and the total HF energy. """ filepointer = open(path) charge = None NE = None E_HF = None dipole = None read_dipole = False for line in filepointer: if read_dipole: read_dipole = False dipole = [float(value) for value in line.split(' ') if '.' in value] dipole = np.linalg.norm(dipole) elif 'Charge' in line and not charge: charge = line.split(' ')[-1].rstrip('\n') elif 'Number of electrons' in line and not NE: NE = line.split(' ')[-1].rstrip('\n') elif 'Total Energy' in line and not E_HF: E_HF = line.split(' ')[-1].rstrip('\n') elif 'Dipole Moment' in line and not dipole: read_dipole = True if charge and NE and E_HF and dipole: break return [charge, NE, dipole, E_HF] def center_molecule(atom_coords): center = get_geom_center(atom_coords) atom_coords = move_center_to_point(atom_coords, center) return atom_coords def get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2, atom_coords_2): pair_list = [] for i in xrange(len(atom_coords_1)): best_hit = (9, None) for j in xrange(len(atom_coords_2)): dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j]) if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]: best_hit = (dist, j) pair_list.append(best_hit[1]) # =========================================================================== # print # for i in xrange(len(pair_list)): # print atom_atoms_1[i],atom_atoms_2[pair_list[i]] #=========================================================================== return pair_list def bond_order(bondxi, threshold_single_meso=0.0847, # ================================================================ # threshold_meso_double=0.184, #================================================================ threshold_meso_double=0.0847, threshold_double_triple=0.27): """ Returns the bond order between two atoms. """ if bondxi < threshold_single_meso: order = '1' elif bondxi < threshold_meso_double: order = '1.5' elif bondxi < threshold_double_triple: order = '2' else: order = '3' return order # =============================================================================== # def rotate_3D_symmetric(atom,source_atom): # ''' # Rotates the ADP of 'atom' to match the orientation # of 'source_atom. # ''' # cosangle=np.dot(atom.orientation[0],source_atom.orientation[0]) # angle=np.arccos(cosangle) # axis=np.cross(atom.orientation[0],source_atom.orientation[0]) # axis=axis/np.linalg.norm(axis) # matrix=get_3drotation_matrix(axis,angle) # orientation0_new=np.dot(source_atom.orientation[0],matrix) # if np.linalg.norm(orientation0_new-atom.orientation[0])<0.00001: # pass # else: # angle=angle*-1 # matrix=get_3drotation_matrix(axis,angle) # # atom.adp['cart_int']=rotate_adp(source_atom.adp['cart_int'],matrix) #=============================================================================== def rotate_3D(atom, source_atom): """ Rotates the ADP of 'atom' to match the orientation of 'source_atom. """ from lauescript.cryst.match import get_transform lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]] lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]] matrix = get_transform(lst1, lst2, matrix=True) adp = source_atom.adp['cart_int'] atom.adp['cart_int'] = rotate_adp(adp, matrix) def xi(element1, element2, distance): """ Calculates the bond distinguishing parameter Xi. """ return (float(covalence_radius[element1]) + float(covalence_radius[element2]) - (0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance) def get_orientation_vector(atom1, atom2): v = atom1.cart - atom2.cart return v / np.linalg.norm(v) def framework_crawler(atom, direction, rigid_group_old=None): """ Function to identify atoms belonging to a previosly defined rigid group. Arguments: atom: the name of the first atom of the rigid group. direction: the name of the second atom of the rigid group. rigid_group_old: used by the function itself for consecutive calls. Returns a list of atom names belonging to the rigid group. """ if not rigid_group_old: rigid_group = [atom, direction] else: rigid_group = rigid_group_old for atom in get_framework_neighbours(direction): if not atom in rigid_group and not atom.element == 'H': rigid_group.append(atom) framework_crawler(rigid_group[0], atom, rigid_group) if not rigid_group_old: #======================================================================= # print ' Determined rigid group:', [i.name for i in rigid_group] #======================================================================= return rigid_group def get_closest_atom_of_element(element, atom, exclude=None): """ Returns the atom with the shortest distance to the given atom. """ for atom2 in atom.partner: if (element == atom2.element or not element) and not atom2 == exclude: return atom2 def get_atom_with_longest_bond(element, atom): hit = None for atom2 in atom.partner: if element in atom2.name: if np.linalg.norm(atom.cart - atom2.cart) < 1.8: hit = atom2 else: break return hit def get_framework_neighbours(atom, useH=True): """ Needs a ATOM.atom instance as argument. Returns the names of the framework atoms bound to that atom. """ neighbourlist = [] for atom2 in atom.partner[:5]: #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6: if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float( covalence_radius[atom2.element]) + .1: if not 'H' == atom2.element or useH: neighbourlist.append(atom2) return neighbourlist #=============================================================================== # def get_framework_neighbours(atom,useH=True): # """ # Needs a classes.atom instance as argument. # Returns the names of the framework atoms bound to that atom. # """ # neighbourlist=[] # for atom2 in atom.partner[atom.molecule.name][1:5]: # #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6: # if np.linalg.norm(atom.cart-atom2.cart)<=1.6: # if not 'H(' in atom2.name or useH: # neighbourlist.append(atom2) # return neighbourlist #=============================================================================== def read_meas_adp(data, path='xd.res', use='meas'): """ Reads the measured ADP from the xd.res file. The parameters are stored in atom.adp['frac_meas'] and atom.adp['cart_meas'] """ use2 = 'frac_' + use switch = False filepointer = open(path, 'r') atomname = None for line in filepointer: if switch: split = [i for i in line.split(' ') if len(i) > 0] if not len(split) == 6: print('WARNING!!! Inconsistend number of floats while\ reading measured ADP.') data['exp'][atomname].adp[use2] = split switch = False if '(' in line: split = [i for i in line.split(' ') if len(i) > 0] if split[0][-1] == ')': switch = True atomname = split[0] use = 'cart_' + use for atom in data['exp'].atoms: # if use == 'cart_neut': print(atom) atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.frac2cartmatrix, atom.molecule.cell) return data def reflect_adp(adp, planev): """ Returns the ADP after reflection on the plane defined by its normal vector 'planev'. """ M = np.identity(4) M[:3, :3] -= 2.0 * np.outer(planev, planev) M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev return rotate_adp(adp, M[:3, :3]) def eigenv2tensor(axis): """ Calculates the tensor representation of ADP from its priciple axis. """ vec = np.ones((3, 3)) vecval = np.ones((3, 3)) for i in xrange(len(axis)): vmag = np.linalg.norm(axis[i]) v = axis[i] / vmag #print v vec[:, i] = v vecval[:, i] = axis[i] adp = np.linalg.solve(vec, vecval) return adp def get_adp_from_calc(vx, vy, vz): """ Calculates an ADP in its matrix representation from the three principle axis representing the displacement ellipsoid. The three principle axis of the ellipsoid are needed as arguments. A Matrix representation of the ADP is returned. """ ## lx=np.linalg.norm(vx) ## ly=np.linalg.norm(vy) ## lz=np.linalg.norm(vz) lx = vx ly = vy lz = vz L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]]) ## Vx=vx/lx ## Vy=vy/ly ## Vz=vz/lz Vx = np.array([1, 0, 0]) Vy = np.array([0, 1, 0]) Vz = np.array([0, 0, 1]) V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy[2], Vz[2]]]) Vinv = np.linalg.inv(V) #print V,Vinv M = np.dot(np.dot(Vinv, L), V) #print M return M #=============================================================================== # # # def get_general_distances(coordlist1,coordlist2,atomlist1,atomlist2): # """ # Calculates a distance dictionary between two sets of atoms. # Returns a dictionary entry for every atom in atomlist1 with the inter atom # distances and the corresponding atom name keyed to their atom type. # # This function is used by the get_best_point() function. # """ # maindict={} # for i in xrange(len(atomlist1)): # distdict={} # for j in xrange(len(atomlist2)): # if not atomlist2[j][0] in distdict.keys(): # distdict[atomlist2[j][0]]=[[np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]] # else: # distdict[atomlist2[j][0]].append([np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]) # ## print atomlist1[i],'aaaaaaaaaaa' # maindict[atomlist1[i]]=distdict # return maindict #=============================================================================== def get_best_quaternion(coordlist1, coordlist2): """ Determines the the quaternion representing the best possible transformation of two coordinate systems into each other using a least sqare approach. This function is used by the get_refined_rotation() function. """ M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]]) if len(coordlist1) <= len(coordlist2): number = len(coordlist1) else: number = len(coordlist2) for i in xrange(number): aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i])) M = M + aaa N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2]) N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2]) N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2]) N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2]) N12 = float(M[1][:, 2] - M[2][:, 1]) N13 = float(M[2][:, 0] - M[0][:, 2]) N14 = float(M[0][:, 1] - M[1][:, 0]) N21 = float(N12) N23 = float(M[0][:, 1] + M[1][:, 0]) N24 = float(M[2][:, 0] + M[0][:, 2]) N31 = float(N13) N32 = float(N23) N34 = float(M[1][:, 2] + M[2][:, 1]) N41 = float(N14) N42 = float(N24) N43 = float(N34) N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32, N33, N34], [N41, N42, N43, N44]]) values, vectors = np.linalg.eig(N) w = list(values) quat = vectors[:, w.index(max(w))] quat = np.array(quat).reshape(-1, ).tolist() return quat, max(w) def get_rotation_matrix_from_quaternion(q): """ Returns the rotation matrix equivalent of the given quaternion. This function is used by the get_refined_rotation() function. """ R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])], [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] * q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]]) return R def get_geom_center(coordlist): """ Calculates the geometrical center of a set of points. """ return sum(coordlist) / len(coordlist) def move_center_to_point(atomlist, point): """ Moves the geometrical center of the atoms in atomlist to the given point. """ for atom in range(len(atomlist)): atomlist[atom] = atomlist[atom] - point return atomlist def rotate_adp_reverse(adp, rotmat): """ Rotates the adp with its corresponding rotation matrix. """ adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmat, adp) adp = np.dot(adp, rotmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]] def rotate_adp(adp, rotmat): """ Rotates the adp with its corresponding rotation matrix. """ adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmatT = np.transpose(rotmat) adp = np.dot(rotmatT, adp) adp = np.dot(adp, rotmat) # print '=\n',adp,'\n-------------------------------------------------\n\n\n\n\n\n' adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]] def rotate_adp2(adp, rotmat, cell): """ Rotates the adp with its corresponding rotation matrix. """ adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmat = np.linalg.inv(rotmat) rotmatT = np.transpose(rotmat) Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 / cell[2]]]) Nmat = np.linalg.inv(Nmat) NmatT = np.transpose(Nmat) adp = np.dot(rotmat, adp) adp = np.dot(adp, rotmatT) adp = np.dot(Nmat, adp) adp = np.dot(adp, NmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]] def rotate_adp3(adp, rotmat, cell): """ Rotates the adp with its corresponding rotation matrix. """ adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5]), float(adp[2])]]) rotmati = np.matrix(rotmat) rotmatiT = np.transpose(rotmati) rotmat = np.linalg.inv(rotmat) Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 / cell[2]]]) Nmat = np.linalg.inv(Nmat) NmatT = np.transpose(Nmat) adp = np.dot(rotmati, adp) adp = np.dot(adp, rotmatiT) adp = np.dot(Nmat, adp) adp = np.dot(adp, NmatT) adp = np.array(adp).flatten().tolist() return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]] def rotate_list_by(coordlist, R): """ Returns a list of coordinates where every position is rotated by the the rotation matrix 'R'. """ for coord in xrange(len(coordlist)): value = np.dot(R, coordlist[coord]) value = np.array(value).reshape(-1, ).tolist() coordlist[coord] = value return coordlist def write_xyz(coords, name): filepointer = open(name, 'w') filepointer.write(str(len(coords))) filepointer.write('\n' + name + '\n') for line in coords: filepointer.write('C ') for coord in line: filepointer.write(str(coord) + ' ') filepointer.write('\n') filepointer.close() def write_xyzqt(coords, name): filepointer = open(name, 'a') filepointer.write(name + '\n') for line in coords: filepointer.write('C ') for coord in line: filepointer.write(' ' + str(coord)) filepointer.write('\n') filepointer.close() def get_3drotation_matrix(axis, angle): """ Returns the rotation matrix that rotates a vector around the given axis by the given angle using the "Euler-Rodrigues formula". """ angle = angle #*-1 norm = np.linalg.norm(np.array(axis)) if norm > 0: axis /= norm ax, ay, az = axis[0], axis[1], axis[2] cos, sin = np.cos(angle), np.sin(angle) rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]]) return rotmat def get_normal_vector_of_plane(p1, p2, p3): """ Returns the normal vector of a plane defined by the points p1,p2 and p3. """ v12 = np.array(p1) - np.array(p2) v13 = np.array(p1) - np.array(p3) nvec = np.cross(v12, v13) ## print 'norm: '+str(np.linalg.norm(nvec)) return nvec / np.linalg.norm(nvec) def read_gaussian_coords(): atomlist = [] filepointer = open('g98.out', 'r') for line in filepointer.readlines(): if 'Distance' in line: break try: newline = [float(i) for i in line.split(' ') if len(i) > 0] newline = [newline[:2], np.array(newline[3:])] atomlist.append(newline) except: pass return atomlist def get_closest_neighbours(atomlist, neighbours=2): """ Returns a list where every element is a list of three atomnames. The second and third names are the closest neighbours of the first names. The argument is a list as returned by frac_to_cart and the number of neighbours to be returned. """ print('atomlist', atomlist) neighbourlist = [] for atom in atomlist: listline = [atom[0][0]] dists = [] distsc = [] for partner in atomlist: dists.append(np.linalg.norm(atom[1] - partner[1])) distsc.append(np.linalg.norm(atom[1] - partner[1])) dists.remove(min(dists)) for _ in range(neighbours): if min(dists) < 2.5: listline.append(atomlist[distsc.index(min(dists))][0][0]) dists.remove(min(dists)) #listline.append(atomlist[distsc.index(min(dists))][0][0]) neighbourlist.append(listline) return neighbourlist def calculate_distance_matrix(atomlist): """ Calculates for every atom the distances to all other atoms in atomlist. Returns a list where every element is a list of all distances. """ distlist = [] for atom in atomlist: atomdict = {} for partner in atomlist: if not str(int(partner[0][1])) in atomdict.keys(): atomdict[str(int(partner[0][1]))] = [] atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) else: atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1])) atomdict[str(int(partner[0][1]))].sort() distlist.append(atomdict) return distlist def link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys): """ The function is able to identify equal atoms of one molecule in different coordinate systems independent of the molecule's orientaion. """ hitlist = [] for atom in distlist1: atomtype = int(atomlist1[distlist1.index(atom)][0][1]) valuelist = [] for partner in distlist2: partnertype = int(atomlist2[distlist2.index(partner)][0][1]) if atomtype == partnertype: partnervalue = 0 keylist = partner.keys() for key in keylist: for element in xrange(len(atom[key])): partnervalue += abs(atom[key][element] - partner[key][element]) else: partnervalue = 9999999 valuelist.append(partnervalue) minvalue = min(valuelist) besthit = valuelist.index(minvalue) hitlist.append(besthit) def make_list_unique(seq, idfun=None): if idfun is None: def idfun(x): return x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result def get_influence_atoms(atomlist): """ Determines the atoms defining the chemical enviroment of a given atom by checking their bonding partners. Only the first and second neighbours are considered. """ enviromentlist = [] trunclist = [] neighbourlist = get_closest_neighbours(atomlist, 4) for neighbours in neighbourlist: if neighbours[0][0] == "H": neighbours = neighbours[:2] if neighbours[0][0] == "O": neighbours = neighbours[:3] trunclist.append(neighbours) for atom in trunclist: newatom = [] for atom1partner in atom[1:]: for partner in trunclist: if partner[0] == atom1partner: counter = 0 for atomi in partner: if atomi[0] == 'H': counter += 1 if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'): newatom += atom + partner[1:] newatom = make_list_unique(newatom) newatom.sort() enviromentlist.append(newatom) return enviromentlist def link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys): """ The function is able to identify equivalent atoms in different molecules in different coordinate systems independent of the molecule's orientaion. """ hitlist = [] for atom in distlist1: atomtype = int(atomlist1[distlist1.index(atom)][0][1]) valuelist = [] for partner in distlist2: partnertype = int(atomlist2[distlist2.index(partner)][0][1]) if atomtype == partnertype: partnervalue = 0 keylist = partner.keys() for key in keylist: for element in xrange(len(atom[key])): value = abs(atom[key][element] - partner[key][element]) partnervalue += value else: partnervalue = 9999999 valuelist.append(partnervalue) minvalue = min(valuelist) besthit = valuelist.index(minvalue) hitlist.append(besthit) def read_multiple_coordinates(fragmentnames): """ Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a dictionary where every returnvalue of frac_to_cart is keyed to its fragment name. """ fragdict = {} for name in fragmentnames: path = name + '/' cell, pos = read_coordinates(path) atomlist = frac_to_cart(cell, pos) atomdict = {} for atom in atomlist: atomdict[atom[0][0]] = atom[1] fragdict[name] = atomlist return fragdict ##def read_coordinates(path=''): ## """ ## Reads the cell parameters from a 'xd.mas' file and the atomic positions ## from a 'xd.res' file. ## The function returns a list with the cell parameters and an dictionary which ## keys the atom name to its fractional coordinates. ## """ ## maspointer=open(path+'xd.mas','r') ## respointer=open(path+'xd.res','r') ## positions={} ## keylist=[] #Needed to keep the atomlist order. This is important for the frequency read function. ## for line in maspointer.readlines(): ## if 'CELL' in line: ## cell=[float(i) for i in line.split(" ") if '.' in i] ## for line in respointer.readlines(): ## if '(' in line and not '!' in line: ## coords=[float(i) for i in line.split(" ") if '.' in i] ## coords=coords[:-1] ## key=line.split(" ")[0] ## keylist.append(key) ## positions[key]=coords ## sortkeylist=[] ## for i in xrange(len(keylist)): ## j=i+1 ## for key in keylist: ## if j==int(key[2:-1]): ## sortkeylist.append(key) ## return cell,positions,sortkeylist def read_xd_master_file(path, errorpointer): """ Returns the compound name and the cell parameters from a xd.mas style file specified by 'path'. """ filepointer = open(path, 'r') for line in filepointer.readlines(): if 'TITLE' in line: compound_name = line.partition('!')[2].lstrip().rstrip() if 'CELL' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break filepointer.close() try: return compound_name, cell except: errorpointer.write(path + '\n') return None, None def read_xd_parameter_file(path, sort=False): respointer = open(path, 'r') positions = {} keylist = [] for line in respointer.readlines(): if '(' in line and not '!' in line: coords = [float(i) for i in line.split(" ") if '.' in i] coords = coords[:-1] key = line.split(" ")[0] keylist.append(key) positions[key] = coords if sort: sortkeylist = [] for i in xrange(len(keylist)): j = i + 1 for key in keylist: number = get_number(key) if j == int(number): sortkeylist.append(key) else: sortkeylist = keylist return positions, sortkeylist def read_coordinates(path='', sort=True): """ Reads the cell parameters from a 'xd.mas' file and the atomic positions from a 'xd.res' file. The function returns a list with the cell parameters and an dictionary which keys the atom name to its fractional coordinates. """ maspointer = open(path + 'xd.mas', 'r') respointer = open(path + 'xd.res', 'r') positions = {} keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function. for line in maspointer.readlines(): if 'CELL ' in line: cell = [float(i) for i in line.split(" ") if '.' in i] break for line in respointer.readlines(): if '(' in line and not '!' in line: coords = [float(i) for i in line.split(" ") if '.' in i] coords = coords[:-1] key = line.split(" ")[0] keylist.append(key) positions[key] = coords if sort: sortkeylist = [] for i in xrange(len(keylist)): j = i + 1 for key in keylist: number = get_number(key) if j == int(number): sortkeylist.append(key) else: sortkeylist = keylist return cell, positions, sortkeylist def get_number(atomname): """ Returns the number in the brackets of an atomname. """ switch = False number = '' for char in atomname: if char == ')': switch = False if switch: number += char if char == '(': switch = True return number def frac_to_cart(cell, positions): """ Transforms a set of given fractional coordinates to cartesian coordinates. Needs a list containing the cell parameters as its first argument and the dictionary returned by read coordinates(). Returns a dictionary with cartesian coordinates analog to fractional dictionary. """ atomlist = [] counter = 1 a, b, c = cell[0], cell[1], cell[2] alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \ + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma)) transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)], [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)], [0, 0, c * v / np.sin(gamma)]]) for atom in positions: coordmatrix = np.dot(transmatrix, positions[str(atom)]) coordmatrix = np.array(coordmatrix).flatten().tolist() atomlist.append([]) atomlist[-1].append([atom, atomtable[atom[0]]]) counter += 1 atomlist[-1].append(np.array(coordmatrix)) return atomlist def list_to_dict(atomlist, full=False): """ Keys the coordinates of the atoms read from xd.res to the numerical part of its name. """ atomdict = {} if full: for atom in atomlist: atomdict[atom[0]] = atom[1] else: for atom in atomlist: atomdict[atom[0][0]] = atom[1] return atomdict #=============================================================================== # def link_atoms(gatomlist,xatomdict): # """ # Returns a list of pairs of equivalten atoms. # """ # linklist=[] # keylist=xatomdict.keys() # for atom in xrange(len(gatomlist)): # for key in keylist: # if int(key)==atom+1: # linklistline=[atomlist[atom][1],xatomdict[key]] # linklist.append(linklistline) # break # return linklist #=============================================================================== #=============================================================================== # def get_random_plane(linklist): # """ # Randomly picks three atoms to build a plane from. # """ # planepoints=random.sample(linklist,3) # gplanenorm=get_normal_vector_of_plane(planepoints[0][0],planepoints[1][0],planepoints[2][0]) # gplanedir=np.linalg.norm(planepoints[0][0]-planepoints[1][0]) # xplanenorm=get_normal_vector_of_plane(planepoints[0][1],planepoints[1][1],planepoints[2][1]) # xdplanedir=np.linalg.norm(planepoints[0][1]-planepoints[1][1]) # return gplanenorm,xplanenorm #=============================================================================== def get_angle(v1, v2): """ Returns the angle between two vectors. """ return np.arccos(np.dot(v1, v2)) def read_invout_database(path): path += 'Invariome.out' filepointer = open(path, 'r') invnames = {} for line in filepointer.readlines(): splitted = line.split(' ') invnames[splitted[0][:-1]] = splitted[1][:-1] return invnames
normal
{ "blob_id": "27e685750e5caa2f80c5a6399b07435ee9aa9fb9", "index": 7936, "step-1": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\n<mask token>\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\n<mask token>\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\n<mask token>\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n<mask token>\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\n<mask token>\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\n<mask token>\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\n<mask token>\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\n<mask token>\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n<mask token>\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\n<mask token>\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n", "step-3": "<mask token>\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.\n orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n matrix = get_transform(lst1, lst2, matrix=True)\n adp = source_atom.adp['cart_int']\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\n<mask token>\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\n<mask token>\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]])\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy\n [2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n M = np.dot(np.dot(Vinv, L), V)\n return M\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\n<mask token>\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\n<mask token>\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n distlist.append(atomdict)\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n\n def idfun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\n<mask token>\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n positions = {}\n keylist = []\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\n<mask token>\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n", "step-4": "<mask token>\nimport numpy as np\natomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O':\n 8, 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,\n 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V':\n 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': \n 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}\ncovalence_radius = {'H': 0.37, 'He': 0.0, 'Li': 1.23, 'Be': 0.9, 'B': 0.8,\n 'C': 0.77, 'N': 0.74, 'O': 0.71, 'F': 0.72, 'Ne': 0.0, 'Na': 1.54, 'Mg':\n 1.36, 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': 0.99, 'Ar': \n 0.0, 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22, 'Cr': \n 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15, 'Cu': 1.17, 'Zn':\n 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.2, 'Se': 1.16, 'Br': 1.14, 'Kr': \n 0.0, 'Rb': 2.18}\nelectro_negativ = {'H': 2.2, 'He': 5.5, 'Li': 0.97, 'Be': 1.47, 'B': 2.01,\n 'C': 2.5, 'N': 3.07, 'O': 3.5, 'F': 4.4, 'Ne': 4.8, 'Na': 1.01, 'Mg': \n 1.23, 'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': \n 3.2, 'K': 0.91, 'Ca': 1.04, 'Sc': 1.2, 'Ti': 1.32, 'V': 1.45, 'Cr': \n 1.56, 'Mn': 1.6, 'Fe': 1.64, 'Co': 1.7, 'Ni': 1.75, 'Cu': 1.75, 'Zn': \n 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.2, 'Se': 2.48, 'Br': 2.74, 'Kr': \n 2.9, 'Rb': 0.89}\nproton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B':\n '005', 'C': '006', 'N': '007', 'O': '008', 'F': '009', 'Ne': '010',\n 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015', 'S':\n '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021',\n 'Ti': '022', 'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co':\n '027', 'Ni': '028', 'Cu': '029', 'Zn': '030', 'Ga': '031', 'Ge': '032',\n 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}\nnumber_proton = dict([[v, k] for k, v in proton_number.items()])\npriority = {'3': '5', '2': '4', '1.5': '3', '6': '2', '5': '1', '1': '0'}\n\n\ndef frac2cart(coords, matrix):\n coords = np.dot(matrix, coords).flatten().tolist()[0]\n return coords\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (\n 1.0 / 3.0)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.0\n else:\n print(\n \"crystgeom: Error: please specify mean as 'geometric' or 'arithmetic'\"\n )\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]], [adp[3], adp[1], adp[5]], [\n adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value\n ]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1, atom_elements_2,\n atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = 9, None\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = dist, j\n pair_list.append(best_hit[1])\n return pair_list\n\n\ndef bond_order(bondxi, threshold_single_meso=0.0847, threshold_meso_double=\n 0.0847, threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.\n orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n matrix = get_transform(lst1, lst2, matrix=True)\n adp = source_atom.adp['cart_int']\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return float(covalence_radius[element1]) + float(covalence_radius[element2]\n ) - 0.08 * float(abs(electro_negativ[element1] - electro_negativ[\n element2])) - distance\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\ndef framework_crawler(atom, direction, rigid_group_old=None):\n \"\"\"\n Function to identify atoms belonging to a previosly defined rigid\n group.\n Arguments:\n atom: the name of the first atom of the rigid group.\n direction: the name of the second atom of the rigid group.\n rigid_group_old: used by the function itself for consecutive calls.\n\n Returns a list of atom names belonging to the rigid group.\n \"\"\"\n if not rigid_group_old:\n rigid_group = [atom, direction]\n else:\n rigid_group = rigid_group_old\n for atom in get_framework_neighbours(direction):\n if not atom in rigid_group and not atom.element == 'H':\n rigid_group.append(atom)\n framework_crawler(rigid_group[0], atom, rigid_group)\n if not rigid_group_old:\n return rigid_group\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius\n [atom.element]) + float(covalence_radius[atom2.element]) + 0.1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print(\n 'WARNING!!! Inconsistend number of floats while reading measured ADP.'\n )\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n atom.adp[use] = rotate_adp2(atom.adp[use2], atom.molecule.\n frac2cartmatrix, atom.molecule.cell)\n return data\n\n\ndef reflect_adp(adp, planev):\n \"\"\"\n Returns the ADP after reflection on the plane defined by its normal\n vector 'planev'.\n \"\"\"\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = 2.0 * np.dot(np.array([0, 0, 0]), planev) * planev\n return rotate_adp(adp, M[:3, :3])\n\n\ndef eigenv2tensor(axis):\n \"\"\"\n Calculates the tensor representation of ADP from its priciple axis.\n \"\"\"\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0], [0, ly, 0], [0, 0, lz]])\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]], [Vx[1], Vy[1], Vz[1]], [Vx[2], Vy\n [2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n M = np.dot(np.dot(Vinv, L), V)\n return M\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n N = np.matrix([[N11, N12, N13, N14], [N21, N22, N23, N24], [N31, N32,\n N33, N34], [N41, N42, N43, N44]])\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1).tolist()\n return quat, max(w)\n\n\ndef get_rotation_matrix_from_quaternion(q):\n \"\"\"\n Returns the rotation matrix equivalent of the given quaternion.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3], \n 2 * (q[1] * q[2] - q[0] * q[3]), 2 * (q[1] * q[3] + q[0] * q[2])],\n [2 * (q[2] * q[1] + q[0] * q[3]), q[0] * q[0] - q[1] * q[1] + q[2] *\n q[2] - q[3] * q[3], 2 * (q[2] * q[3] - q[0] * q[1])], [2 * (q[3] *\n q[1] - q[0] * q[2]), 2 * (q[3] * q[2] + q[0] * q[1]), q[0] * q[0] -\n q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])\n return R\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])], [float(\n adp[3]), float(adp[1]), float(adp[5])], [float(adp[4]), float(adp[5\n ]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0], [0, 1 / cell[1], 0], [0, 0, 1 /\n cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) -\n az * sin, ax * az * (1 - cos) + ay * sin], [ay * ax * (1 - cos) +\n az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax *\n sin], [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax *\n sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line:\n break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(\n atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n distlist.append(atomdict)\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[\n key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n\n def idfun(x):\n return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n if marker in seen:\n continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\ndef get_influence_atoms(atomlist):\n \"\"\"\n Determines the atoms defining the chemical enviroment of a given atom by checking\n their bonding partners. Only the first and second neighbours are considered.\n \"\"\"\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == 'H':\n neighbours = neighbours[:2]\n if neighbours[0][0] == 'O':\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n if counter < 2 or partner[0] in atom and atom[0][0] == 'H':\n newatom += atom + partner[1:]\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2,\n keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n positions = {}\n keylist = []\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(' ') if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(' ') if '.' in i]\n coords = coords[:-1]\n key = line.split(' ')[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\ndef frac_to_cart(cell, positions):\n \"\"\"\n Transforms a set of given fractional coordinates to cartesian coordinates.\n Needs a list containing the cell parameters as its first argument and the dictionary\n returned by read coordinates().\n Returns a dictionary with cartesian coordinates analog to fractional dictionary.\n \"\"\"\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5\n ] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(\n beta) - np.cos(gamma) * np.cos(gamma) + 2 * np.cos(alpha) * np.cos(\n beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)], [0, \n b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma\n )) / np.sin(gamma)], [0, 0, c * v / np.sin(gamma)]])\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n", "step-5": "\"\"\"\nCreated on Feb 10, 2013\n\n@author: jens\n\nDeprecated module for crystallogrphy related geometry operations. And a lot\nof other stuff that I put here.\n\"\"\"\n\nimport numpy as np\n\n\natomtable = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8,\n 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15,\n 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22,\n 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29,\n 'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36}\n\ncovalence_radius = {'H': .37, 'He': .0, 'Li': 1.23, 'Be': .90, 'B': .80, 'C': .77,\n 'N': .74, 'O': .71, 'F': .72, 'Ne': 0., 'Na': 1.54, 'Mg': 1.36,\n 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': .99, 'Ar': 0.,\n 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22,\n 'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16, 'Ni': 1.15,\n 'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20,\n 'Se': 1.16, 'Br': 1.14, 'Kr': 0.,\n 'Rb': 2.18} # , 191, 162, 145, 134, 130, 127, 125, 125, 128, 134, 148, 144, 141, 140, 136, 133, 0, 235, 198, 169, 165, 165, 164, 164, 162, 185, 161, 159, 159, 157, 157, 156, 170, 156, 144, 134, 130, 128, 126, 127, 130, 134, 149, 148, 147, 146, 146, 145, 0, 0, 0, 188, 165, 161, 142, 130, 151, 182, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}\n\nelectro_negativ = {'H': 2.20, 'He': 5.50, 'Li': .97, 'Be': 1.47, 'B': 2.01, 'C': 2.50,\n 'N': 3.07, 'O': 3.50, 'F': 4.40, 'Ne': 4.80, 'Na': 1.01, 'Mg': 1.23,\n 'Al': 1.47, 'Si': 1.74, 'P': 2.06, 'S': 2.44, 'Cl': 2.83, 'Ar': 3.20,\n 'K': .91, 'Ca': 1.04, 'Sc': 1.20, 'Ti': 1.32, 'V': 1.45,\n 'Cr': 1.56, 'Mn': 1.60, 'Fe': 1.64, 'Co': 1.70, 'Ni': 1.75,\n 'Cu': 1.75, 'Zn': 1.66, 'Ga': 1.82, 'Ge': 2.02, 'As': 2.20,\n 'Se': 2.48, 'Br': 2.74, 'Kr': 2.90,\n 'Rb': .89} # , 99, 111, 122, 123, 130, 136, 142, 145, 130, 142, 146, 149, 172, 182, 201, 221, 240, 86, 97, 108, 108, 107, 107, 107, 107, 110, 111, 110, 110, 110, 111, 111, 106, 114, 123, 133, 140, 146, 152, 155, 142, 142, 144, 144, 155, 167 }\n\nproton_number = {'H': '001', 'He': '002', 'Li': '003', 'Be': '004', 'B': '005', 'C': '006', 'N': '007', 'O': '008',\n 'F': '009', 'Ne': '010', 'Na': '011', 'Mg': '012', 'Al': '013', 'Si': '014', 'P': '015',\n 'S': '016', 'Cl': '017', 'Ar': '018', 'K': '019', 'Ca': '020', 'Sc': '021', 'Ti': '022',\n 'V': '023', 'Cr': '024', 'Mn': '025', 'Fe': '026', 'Co': '027', 'Ni': '028', 'Cu': '029',\n 'Zn': '030', 'Ga': '031', 'Ge': '032', 'As': '033', 'Se': '034', 'Br': '035', 'Kr': '036'}\n\nnumber_proton = dict([[v, k] for k, v in proton_number.items()])\n\npriority = {'3': '5',\n '2': '4',\n '1.5': '3',\n '6': '2',\n '5': '1',\n '1': '0'}\n\n\ndef frac2cart(coords, matrix):\n coords = np.dot(matrix, coords).flatten().tolist()[0]\n return coords\n\n\ndef xd_element(name):\n \"\"\"\n Return the element of an atom as defined in it's label.\n \"\"\"\n try:\n name = name[:2]\n except:\n pass\n try:\n covalence_radius[name]\n except:\n name = name[0]\n return name\n\n\ndef Uiso(adp, mean='geometric'):\n try:\n adp = get_adp_as_matrix(adp)\n eigvals = np.linalg.eigvals(adp)\n if mean == 'geometric':\n return (abs(eigvals[0]) * abs(eigvals[1]) * abs(eigvals[2])) ** (1. / 3.)\n elif mean == 'arithmetic':\n return sum(eigvals) / 3.\n else:\n print('crystgeom: Error: please specify mean as \\'geometric\\' or \\'arithmetic\\'')\n exit()\n except:\n return adp\n\n\ndef get_adp_as_matrix(adp):\n if adp is None:\n return None\n return np.matrix([[adp[0], adp[3], adp[4]],\n [adp[3], adp[1], adp[5]],\n [adp[4], adp[5], adp[2]]])\n\n\ndef get_compound_properties(path):\n \"\"\"\n Reads a *.FChk file and returns a list containing the charge of\n the compound, the number of electrons in the compound, the overall\n lengths of the dipole moment vector and the total HF energy.\n \"\"\"\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]\n\n\ndef center_molecule(atom_coords):\n center = get_geom_center(atom_coords)\n atom_coords = move_center_to_point(atom_coords, center)\n return atom_coords\n\n\ndef get_pair_list(atom_elements_1, atom_coords_1,\n atom_elements_2, atom_coords_2):\n pair_list = []\n for i in xrange(len(atom_coords_1)):\n best_hit = (9, None)\n for j in xrange(len(atom_coords_2)):\n dist = np.linalg.norm(atom_coords_1[i] - atom_coords_2[j])\n if dist < best_hit[0] and atom_elements_1[i] == atom_elements_2[j]:\n best_hit = (dist, j)\n pair_list.append(best_hit[1])\n # ===========================================================================\n # print\n # for i in xrange(len(pair_list)):\n # print atom_atoms_1[i],atom_atoms_2[pair_list[i]]\n #===========================================================================\n return pair_list\n\n\ndef bond_order(bondxi,\n threshold_single_meso=0.0847,\n # ================================================================\n # threshold_meso_double=0.184,\n #================================================================\n threshold_meso_double=0.0847,\n threshold_double_triple=0.27):\n \"\"\"\n Returns the bond order between two atoms.\n \"\"\"\n if bondxi < threshold_single_meso:\n order = '1'\n elif bondxi < threshold_meso_double:\n order = '1.5'\n elif bondxi < threshold_double_triple:\n order = '2'\n else:\n order = '3'\n return order\n\n\n# ===============================================================================\n# def rotate_3D_symmetric(atom,source_atom):\n# '''\n# Rotates the ADP of 'atom' to match the orientation\n# of 'source_atom.\n# '''\n# cosangle=np.dot(atom.orientation[0],source_atom.orientation[0])\n# angle=np.arccos(cosangle)\n# axis=np.cross(atom.orientation[0],source_atom.orientation[0])\n# axis=axis/np.linalg.norm(axis)\n# matrix=get_3drotation_matrix(axis,angle)\n# orientation0_new=np.dot(source_atom.orientation[0],matrix)\n# if np.linalg.norm(orientation0_new-atom.orientation[0])<0.00001:\n# pass\n# else:\n# angle=angle*-1\n# matrix=get_3drotation_matrix(axis,angle)\n#\n# atom.adp['cart_int']=rotate_adp(source_atom.adp['cart_int'],matrix)\n#===============================================================================\n\n\n\n\ndef rotate_3D(atom, source_atom):\n \"\"\"\n Rotates the ADP of 'atom' to match the orientation\n of 'source_atom.\n \"\"\"\n from lauescript.cryst.match import get_transform\n\n lst2 = [np.array([0, 0, 0]), source_atom.orientation[0], source_atom.orientation[1]]\n lst1 = [np.array([0, 0, 0]), atom.orientation[0], atom.orientation[1]]\n\n matrix = get_transform(lst1, lst2, matrix=True)\n\n adp = source_atom.adp['cart_int']\n\n atom.adp['cart_int'] = rotate_adp(adp, matrix)\n\n\ndef xi(element1, element2, distance):\n \"\"\"\n Calculates the bond distinguishing parameter Xi.\n \"\"\"\n return (float(covalence_radius[element1]) + float(covalence_radius[element2]) -\n (0.08 * float(abs(electro_negativ[element1] - electro_negativ[element2]))) - distance)\n\n\ndef get_orientation_vector(atom1, atom2):\n v = atom1.cart - atom2.cart\n return v / np.linalg.norm(v)\n\n\ndef framework_crawler(atom, direction, rigid_group_old=None):\n \"\"\"\n Function to identify atoms belonging to a previosly defined rigid\n group.\n Arguments:\n atom: the name of the first atom of the rigid group.\n direction: the name of the second atom of the rigid group.\n rigid_group_old: used by the function itself for consecutive calls.\n\n Returns a list of atom names belonging to the rigid group.\n \"\"\"\n if not rigid_group_old:\n rigid_group = [atom, direction]\n else:\n rigid_group = rigid_group_old\n for atom in get_framework_neighbours(direction):\n if not atom in rigid_group and not atom.element == 'H':\n rigid_group.append(atom)\n framework_crawler(rigid_group[0], atom, rigid_group)\n if not rigid_group_old:\n #=======================================================================\n # print ' Determined rigid group:', [i.name for i in rigid_group]\n #=======================================================================\n return rigid_group\n\n\ndef get_closest_atom_of_element(element, atom, exclude=None):\n \"\"\"\n Returns the atom with the shortest distance to the given atom.\n \"\"\"\n for atom2 in atom.partner:\n if (element == atom2.element or not element) and not atom2 == exclude:\n return atom2\n\n\ndef get_atom_with_longest_bond(element, atom):\n hit = None\n for atom2 in atom.partner:\n if element in atom2.name:\n if np.linalg.norm(atom.cart - atom2.cart) < 1.8:\n hit = atom2\n else:\n break\n return hit\n\n\ndef get_framework_neighbours(atom, useH=True):\n \"\"\"\n Needs a ATOM.atom instance as argument.\n Returns the names of the framework atoms bound to that atom.\n \"\"\"\n neighbourlist = []\n for atom2 in atom.partner[:5]:\n #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n if np.linalg.norm(atom.cart - atom2.cart) <= float(covalence_radius[atom.element]) + float(\n covalence_radius[atom2.element]) + .1:\n if not 'H' == atom2.element or useH:\n neighbourlist.append(atom2)\n return neighbourlist\n\n\n#===============================================================================\n# def get_framework_neighbours(atom,useH=True):\n# \"\"\"\n# Needs a classes.atom instance as argument.\n# Returns the names of the framework atoms bound to that atom.\n# \"\"\"\n# neighbourlist=[]\n# for atom2 in atom.partner[atom.molecule.name][1:5]:\n# #if not 'H(' in atom2.name and np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n# if np.linalg.norm(atom.cart-atom2.cart)<=1.6:\n# if not 'H(' in atom2.name or useH:\n# neighbourlist.append(atom2)\n# return neighbourlist\n#===============================================================================\n\ndef read_meas_adp(data, path='xd.res', use='meas'):\n \"\"\"\n Reads the measured ADP from the xd.res file.\n The parameters are stored in atom.adp['frac_meas'] and\n atom.adp['cart_meas']\n \"\"\"\n use2 = 'frac_' + use\n switch = False\n filepointer = open(path, 'r')\n atomname = None\n for line in filepointer:\n if switch:\n split = [i for i in line.split(' ') if len(i) > 0]\n if not len(split) == 6:\n print('WARNING!!! Inconsistend number of floats while\\\n reading measured ADP.')\n data['exp'][atomname].adp[use2] = split\n switch = False\n if '(' in line:\n split = [i for i in line.split(' ') if len(i) > 0]\n if split[0][-1] == ')':\n switch = True\n atomname = split[0]\n use = 'cart_' + use\n for atom in data['exp'].atoms:\n # if use == 'cart_neut': print(atom)\n atom.adp[use] = rotate_adp2(atom.adp[use2],\n atom.molecule.frac2cartmatrix,\n atom.molecule.cell)\n return data\n\n\ndef reflect_adp(adp, planev):\n \"\"\"\n Returns the ADP after reflection on the plane defined by its normal\n vector 'planev'.\n \"\"\"\n M = np.identity(4)\n M[:3, :3] -= 2.0 * np.outer(planev, planev)\n M[:3, 3] = (2.0 * np.dot(np.array([0, 0, 0]), planev)) * planev\n\n return rotate_adp(adp, M[:3, :3])\n\n\ndef eigenv2tensor(axis):\n \"\"\"\n Calculates the tensor representation of ADP from its priciple axis.\n \"\"\"\n vec = np.ones((3, 3))\n vecval = np.ones((3, 3))\n for i in xrange(len(axis)):\n vmag = np.linalg.norm(axis[i])\n v = axis[i] / vmag\n #print v\n vec[:, i] = v\n vecval[:, i] = axis[i]\n adp = np.linalg.solve(vec, vecval)\n return adp\n\n\ndef get_adp_from_calc(vx, vy, vz):\n \"\"\"\n Calculates an ADP in its matrix representation from the three\n principle axis representing the displacement ellipsoid.\n\n The three principle axis of the ellipsoid are needed as arguments.\n A Matrix representation of the ADP is returned.\n \"\"\"\n ## lx=np.linalg.norm(vx)\n ## ly=np.linalg.norm(vy)\n ## lz=np.linalg.norm(vz)\n lx = vx\n ly = vy\n lz = vz\n L = np.matrix([[lx, 0, 0],\n [0, ly, 0],\n [0, 0, lz]])\n\n\n ## Vx=vx/lx\n ## Vy=vy/ly\n ## Vz=vz/lz\n Vx = np.array([1, 0, 0])\n Vy = np.array([0, 1, 0])\n Vz = np.array([0, 0, 1])\n V = np.matrix([[Vx[0], Vy[0], Vz[0]],\n [Vx[1], Vy[1], Vz[1]],\n [Vx[2], Vy[2], Vz[2]]])\n Vinv = np.linalg.inv(V)\n #print V,Vinv\n M = np.dot(np.dot(Vinv, L), V)\n #print M\n return M\n\n\n#===============================================================================\n#\n#\n# def get_general_distances(coordlist1,coordlist2,atomlist1,atomlist2):\n# \"\"\"\n# Calculates a distance dictionary between two sets of atoms.\n# Returns a dictionary entry for every atom in atomlist1 with the inter atom\n# distances and the corresponding atom name keyed to their atom type.\n#\n# This function is used by the get_best_point() function.\n# \"\"\"\n# maindict={}\n# for i in xrange(len(atomlist1)):\n# distdict={}\n# for j in xrange(len(atomlist2)):\n# if not atomlist2[j][0] in distdict.keys():\n# distdict[atomlist2[j][0]]=[[np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]]]\n# else:\n# distdict[atomlist2[j][0]].append([np.linalg.norm(coordlist1[i]-coordlist2[j]),atomlist2[j]])\n# ## print atomlist1[i],'aaaaaaaaaaa'\n# maindict[atomlist1[i]]=distdict\n# return maindict\n#===============================================================================\n\n\n\ndef get_best_quaternion(coordlist1, coordlist2):\n \"\"\"\n Determines the the quaternion representing the best possible\n transformation of two coordinate systems into each other using\n a least sqare approach.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n M = np.matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n\n if len(coordlist1) <= len(coordlist2):\n number = len(coordlist1)\n else:\n number = len(coordlist2)\n for i in xrange(number):\n aaa = np.matrix(np.outer(coordlist1[i], coordlist2[i]))\n M = M + aaa\n\n N11 = float(M[0][:, 0] + M[1][:, 1] + M[2][:, 2])\n N22 = float(M[0][:, 0] - M[1][:, 1] - M[2][:, 2])\n N33 = float(-M[0][:, 0] + M[1][:, 1] - M[2][:, 2])\n N44 = float(-M[0][:, 0] - M[1][:, 1] + M[2][:, 2])\n N12 = float(M[1][:, 2] - M[2][:, 1])\n N13 = float(M[2][:, 0] - M[0][:, 2])\n N14 = float(M[0][:, 1] - M[1][:, 0])\n N21 = float(N12)\n N23 = float(M[0][:, 1] + M[1][:, 0])\n N24 = float(M[2][:, 0] + M[0][:, 2])\n N31 = float(N13)\n N32 = float(N23)\n N34 = float(M[1][:, 2] + M[2][:, 1])\n N41 = float(N14)\n N42 = float(N24)\n N43 = float(N34)\n\n N = np.matrix([[N11, N12, N13, N14],\n [N21, N22, N23, N24],\n [N31, N32, N33, N34],\n [N41, N42, N43, N44]])\n\n values, vectors = np.linalg.eig(N)\n w = list(values)\n quat = vectors[:, w.index(max(w))]\n quat = np.array(quat).reshape(-1, ).tolist()\n return quat, max(w)\n\n\ndef get_rotation_matrix_from_quaternion(q):\n \"\"\"\n Returns the rotation matrix equivalent of the given quaternion.\n\n This function is used by the get_refined_rotation() function.\n \"\"\"\n R = np.matrix([[q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3],\n 2 * (q[1] * q[2] - q[0] * q[3]),\n 2 * (q[1] * q[3] + q[0] * q[2])],\n [2 * (q[2] * q[1] + q[0] * q[3]),\n q[0] * q[0] - q[1] * q[1] + q[2] * q[2] - q[3] * q[3],\n 2 * (q[2] * q[3] - q[0] * q[1])],\n [2 * (q[3] * q[1] - q[0] * q[2]),\n 2 * (q[3] * q[2] + q[0] * q[1]),\n q[0] * q[0] - q[1] * q[1] - q[2] * q[2] + q[3] * q[3]]])\n return R\n\n\ndef get_geom_center(coordlist):\n \"\"\"\n Calculates the geometrical center of a set of points.\n \"\"\"\n return sum(coordlist) / len(coordlist)\n\n\ndef move_center_to_point(atomlist, point):\n \"\"\"\n Moves the geometrical center of the atoms in atomlist to the given point.\n \"\"\"\n for atom in range(len(atomlist)):\n atomlist[atom] = atomlist[atom] - point\n return atomlist\n\n\ndef rotate_adp_reverse(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp(adp, rotmat):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmatT = np.transpose(rotmat)\n adp = np.dot(rotmatT, adp)\n adp = np.dot(adp, rotmat)\n # print '=\\n',adp,'\\n-------------------------------------------------\\n\\n\\n\\n\\n\\n'\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp2(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmat = np.linalg.inv(rotmat)\n rotmatT = np.transpose(rotmat)\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n\n adp = np.dot(rotmat, adp)\n adp = np.dot(adp, rotmatT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_adp3(adp, rotmat, cell):\n \"\"\"\n Rotates the adp with its corresponding rotation matrix.\n \"\"\"\n adp = np.matrix([[float(adp[0]), float(adp[3]), float(adp[4])],\n [float(adp[3]), float(adp[1]), float(adp[5])],\n [float(adp[4]), float(adp[5]), float(adp[2])]])\n rotmati = np.matrix(rotmat)\n rotmatiT = np.transpose(rotmati)\n rotmat = np.linalg.inv(rotmat)\n\n Nmat = np.matrix([[1 / cell[0], 0, 0],\n [0, 1 / cell[1], 0],\n [0, 0, 1 / cell[2]]])\n Nmat = np.linalg.inv(Nmat)\n NmatT = np.transpose(Nmat)\n adp = np.dot(rotmati, adp)\n adp = np.dot(adp, rotmatiT)\n\n adp = np.dot(Nmat, adp)\n adp = np.dot(adp, NmatT)\n\n adp = np.array(adp).flatten().tolist()\n return [adp[0], adp[4], adp[8], adp[1], adp[2], adp[5]]\n\n\ndef rotate_list_by(coordlist, R):\n \"\"\"\n Returns a list of coordinates where every position is rotated by\n the the rotation matrix 'R'.\n \"\"\"\n for coord in xrange(len(coordlist)):\n value = np.dot(R, coordlist[coord])\n value = np.array(value).reshape(-1, ).tolist()\n coordlist[coord] = value\n return coordlist\n\n\ndef write_xyz(coords, name):\n filepointer = open(name, 'w')\n filepointer.write(str(len(coords)))\n filepointer.write('\\n' + name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(str(coord) + ' ')\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef write_xyzqt(coords, name):\n filepointer = open(name, 'a')\n filepointer.write(name + '\\n')\n for line in coords:\n filepointer.write('C ')\n for coord in line:\n filepointer.write(' ' + str(coord))\n filepointer.write('\\n')\n filepointer.close()\n\n\ndef get_3drotation_matrix(axis, angle):\n \"\"\"\n Returns the rotation matrix that rotates a vector around the given axis\n by the given angle using the \"Euler-Rodrigues formula\".\n \"\"\"\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat\n\n\ndef get_normal_vector_of_plane(p1, p2, p3):\n \"\"\"\n Returns the normal vector of a plane defined by the points p1,p2 and p3.\n \"\"\"\n v12 = np.array(p1) - np.array(p2)\n v13 = np.array(p1) - np.array(p3)\n nvec = np.cross(v12, v13)\n ## print 'norm: '+str(np.linalg.norm(nvec))\n return nvec / np.linalg.norm(nvec)\n\n\ndef read_gaussian_coords():\n atomlist = []\n filepointer = open('g98.out', 'r')\n for line in filepointer.readlines():\n if 'Distance' in line: break\n try:\n newline = [float(i) for i in line.split(' ') if len(i) > 0]\n newline = [newline[:2], np.array(newline[3:])]\n atomlist.append(newline)\n except:\n pass\n return atomlist\n\n\ndef get_closest_neighbours(atomlist, neighbours=2):\n \"\"\"\n Returns a list where every element is a list of three atomnames. The second and third\n names are the closest neighbours of the first names.\n The argument is a list as returned by frac_to_cart and the number of neighbours to be\n returned.\n \"\"\"\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist\n\n\ndef calculate_distance_matrix(atomlist):\n \"\"\"\n Calculates for every atom the distances to all other atoms\n in atomlist.\n Returns a list where every element is a list of all distances.\n \"\"\"\n distlist = []\n for atom in atomlist:\n atomdict = {}\n for partner in atomlist:\n if not str(int(partner[0][1])) in atomdict.keys():\n atomdict[str(int(partner[0][1]))] = []\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n else:\n atomdict[str(int(partner[0][1]))].append(np.linalg.norm(atom[1] - partner[1]))\n atomdict[str(int(partner[0][1]))].sort()\n\n distlist.append(atomdict)\n\n return distlist\n\n\ndef link_atoms_by_distance(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equal atoms of one molecule in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n partnervalue += abs(atom[key][element] - partner[key][element])\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef make_list_unique(seq, idfun=None):\n if idfun is None:\n def idfun(x): return x\n seen = {}\n result = []\n for item in seq:\n marker = idfun(item)\n\n if marker in seen: continue\n seen[marker] = 1\n result.append(item)\n return result\n\n\ndef get_influence_atoms(atomlist):\n \"\"\"\n Determines the atoms defining the chemical enviroment of a given atom by checking\n their bonding partners. Only the first and second neighbours are considered.\n \"\"\"\n enviromentlist = []\n trunclist = []\n neighbourlist = get_closest_neighbours(atomlist, 4)\n for neighbours in neighbourlist:\n if neighbours[0][0] == \"H\":\n neighbours = neighbours[:2]\n if neighbours[0][0] == \"O\":\n neighbours = neighbours[:3]\n trunclist.append(neighbours)\n for atom in trunclist:\n newatom = []\n for atom1partner in atom[1:]:\n for partner in trunclist:\n if partner[0] == atom1partner:\n counter = 0\n\n for atomi in partner:\n if atomi[0] == 'H':\n counter += 1\n\n if counter < 2 or (partner[0] in atom and atom[0][0] == 'H'):\n newatom += atom + partner[1:]\n\n newatom = make_list_unique(newatom)\n newatom.sort()\n enviromentlist.append(newatom)\n return enviromentlist\n\n\ndef link_atoms_by_distance_diff(distlist1, atomlist1, distlist2, atomlist2, keys):\n \"\"\"\n The function is able to identify equivalent atoms in different molecules in different\n coordinate systems independent of the molecule's orientaion.\n \"\"\"\n hitlist = []\n\n for atom in distlist1:\n atomtype = int(atomlist1[distlist1.index(atom)][0][1])\n valuelist = []\n for partner in distlist2:\n partnertype = int(atomlist2[distlist2.index(partner)][0][1])\n if atomtype == partnertype:\n partnervalue = 0\n keylist = partner.keys()\n for key in keylist:\n for element in xrange(len(atom[key])):\n value = abs(atom[key][element] - partner[key][element])\n partnervalue += value\n else:\n partnervalue = 9999999\n valuelist.append(partnervalue)\n minvalue = min(valuelist)\n besthit = valuelist.index(minvalue)\n hitlist.append(besthit)\n\n\ndef read_multiple_coordinates(fragmentnames):\n \"\"\"\n Calls read_coordinates and frac_to_cart for every path=name in fragmentnames and returns a\n dictionary where every returnvalue of frac_to_cart is keyed to its fragment name.\n \"\"\"\n fragdict = {}\n for name in fragmentnames:\n path = name + '/'\n cell, pos = read_coordinates(path)\n atomlist = frac_to_cart(cell, pos)\n atomdict = {}\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n fragdict[name] = atomlist\n return fragdict\n\n\n##def read_coordinates(path=''):\n## \"\"\"\n## Reads the cell parameters from a 'xd.mas' file and the atomic positions\n## from a 'xd.res' file.\n## The function returns a list with the cell parameters and an dictionary which\n## keys the atom name to its fractional coordinates.\n## \"\"\"\n## maspointer=open(path+'xd.mas','r')\n## respointer=open(path+'xd.res','r')\n## positions={}\n## keylist=[] #Needed to keep the atomlist order. This is important for the frequency read function.\n## for line in maspointer.readlines():\n## if 'CELL' in line:\n## cell=[float(i) for i in line.split(\" \") if '.' in i]\n## for line in respointer.readlines():\n## if '(' in line and not '!' in line:\n## coords=[float(i) for i in line.split(\" \") if '.' in i]\n## coords=coords[:-1]\n## key=line.split(\" \")[0]\n## keylist.append(key)\n## positions[key]=coords\n## sortkeylist=[]\n## for i in xrange(len(keylist)):\n## j=i+1\n## for key in keylist:\n## if j==int(key[2:-1]):\n## sortkeylist.append(key)\n## return cell,positions,sortkeylist\n\ndef read_xd_master_file(path, errorpointer):\n \"\"\"\n Returns the compound name and the cell parameters from a xd.mas style\n file specified by 'path'.\n \"\"\"\n filepointer = open(path, 'r')\n for line in filepointer.readlines():\n if 'TITLE' in line:\n compound_name = line.partition('!')[2].lstrip().rstrip()\n if 'CELL' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n filepointer.close()\n try:\n return compound_name, cell\n except:\n errorpointer.write(path + '\\n')\n return None, None\n\n\ndef read_xd_parameter_file(path, sort=False):\n respointer = open(path, 'r')\n positions = {}\n keylist = []\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return positions, sortkeylist\n\n\ndef read_coordinates(path='', sort=True):\n \"\"\"\n Reads the cell parameters from a 'xd.mas' file and the atomic positions\n from a 'xd.res' file.\n The function returns a list with the cell parameters and an dictionary which\n keys the atom name to its fractional coordinates.\n \"\"\"\n maspointer = open(path + 'xd.mas', 'r')\n respointer = open(path + 'xd.res', 'r')\n\n positions = {}\n keylist = [] #Needed to keep the atomlist order. This is important for the frequency read function.\n for line in maspointer.readlines():\n if 'CELL ' in line:\n cell = [float(i) for i in line.split(\" \") if '.' in i]\n break\n for line in respointer.readlines():\n if '(' in line and not '!' in line:\n coords = [float(i) for i in line.split(\" \") if '.' in i]\n coords = coords[:-1]\n key = line.split(\" \")[0]\n keylist.append(key)\n positions[key] = coords\n if sort:\n sortkeylist = []\n for i in xrange(len(keylist)):\n j = i + 1\n for key in keylist:\n number = get_number(key)\n if j == int(number):\n sortkeylist.append(key)\n else:\n sortkeylist = keylist\n return cell, positions, sortkeylist\n\n\ndef get_number(atomname):\n \"\"\"\n Returns the number in the brackets of an atomname.\n \"\"\"\n switch = False\n number = ''\n for char in atomname:\n if char == ')':\n switch = False\n if switch:\n number += char\n if char == '(':\n switch = True\n return number\n\n\ndef frac_to_cart(cell, positions):\n \"\"\"\n Transforms a set of given fractional coordinates to cartesian coordinates.\n Needs a list containing the cell parameters as its first argument and the dictionary\n returned by read coordinates().\n Returns a dictionary with cartesian coordinates analog to fractional dictionary.\n \"\"\"\n atomlist = []\n counter = 1\n a, b, c = cell[0], cell[1], cell[2]\n alpha, beta, gamma = cell[3] / 180 * np.pi, cell[4] / 180 * np.pi, cell[5] / 180 * np.pi\n v = np.sqrt(1 - np.cos(alpha) * np.cos(alpha) - np.cos(beta) * np.cos(beta) - np.cos(gamma) * np.cos(gamma) \\\n + 2 * np.cos(alpha) * np.cos(beta) * np.cos(gamma))\n transmatrix = np.matrix([[a, b * np.cos(gamma), c * np.cos(beta)],\n [0, b * np.sin(gamma), c * (np.cos(alpha) - np.cos(beta) * np.cos(gamma)) / np.sin(gamma)],\n [0, 0, c * v / np.sin(gamma)]])\n\n for atom in positions:\n coordmatrix = np.dot(transmatrix, positions[str(atom)])\n coordmatrix = np.array(coordmatrix).flatten().tolist()\n atomlist.append([])\n atomlist[-1].append([atom, atomtable[atom[0]]])\n counter += 1\n atomlist[-1].append(np.array(coordmatrix))\n return atomlist\n\n\ndef list_to_dict(atomlist, full=False):\n \"\"\"\n Keys the coordinates of the atoms read from xd.res to the numerical part of its name.\n \"\"\"\n atomdict = {}\n if full:\n for atom in atomlist:\n atomdict[atom[0]] = atom[1]\n else:\n for atom in atomlist:\n atomdict[atom[0][0]] = atom[1]\n return atomdict\n\n\n#===============================================================================\n# def link_atoms(gatomlist,xatomdict):\n# \"\"\"\n# Returns a list of pairs of equivalten atoms.\n# \"\"\"\n# linklist=[]\n# keylist=xatomdict.keys()\n# for atom in xrange(len(gatomlist)):\n# for key in keylist:\n# if int(key)==atom+1:\n# linklistline=[atomlist[atom][1],xatomdict[key]]\n# linklist.append(linklistline)\n# break\n# return linklist\n#===============================================================================\n\n#===============================================================================\n# def get_random_plane(linklist):\n# \"\"\"\n# Randomly picks three atoms to build a plane from.\n# \"\"\"\n# planepoints=random.sample(linklist,3)\n# gplanenorm=get_normal_vector_of_plane(planepoints[0][0],planepoints[1][0],planepoints[2][0])\n# gplanedir=np.linalg.norm(planepoints[0][0]-planepoints[1][0])\n# xplanenorm=get_normal_vector_of_plane(planepoints[0][1],planepoints[1][1],planepoints[2][1])\n# xdplanedir=np.linalg.norm(planepoints[0][1]-planepoints[1][1])\n# return gplanenorm,xplanenorm\n#===============================================================================\n\ndef get_angle(v1, v2):\n \"\"\"\n Returns the angle between two vectors.\n \"\"\"\n return np.arccos(np.dot(v1, v2))\n\n\ndef read_invout_database(path):\n path += 'Invariome.out'\n filepointer = open(path, 'r')\n invnames = {}\n for line in filepointer.readlines():\n splitted = line.split(' ')\n invnames[splitted[0][:-1]] = splitted[1][:-1]\n return invnames\n\n\n", "step-ids": [ 23, 33, 40, 50, 51 ] }
[ 23, 33, 40, 50, 51 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('auth', '0001_initial'), ('c4c_app', '0006_c4cjob_complete'), ] operations = [ migrations.AlterModelOptions( name='c4cbranch', options={'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}, ), migrations.AlterModelOptions( name='c4cdonation', options={'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}, ), migrations.AlterModelOptions( name='c4cevent', options={'verbose_name': 'Event', 'verbose_name_plural': 'Events'}, ), migrations.AlterModelOptions( name='c4cjob', options={'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'}, ), migrations.AlterModelOptions( name='c4cuser', options={'verbose_name': 'C4C User', 'verbose_name_plural': 'C4C Users'}, ), migrations.RemoveField( model_name='c4cbranch', name='officers', ), migrations.AddField( model_name='c4cbranch', name='group', field=models.OneToOneField(related_name='in_branches', default=None, to='auth.Group'), preserve_default=False, ), migrations.AddField( model_name='c4cbranch', name='officers_group', field=models.OneToOneField(related_name='is_branch_officer_of', default=None, to='auth.Group'), preserve_default=False, ), migrations.AddField( model_name='c4cjob', name='offer', field=models.BooleanField(default=False), preserve_default=True, ), migrations.AlterField( model_name='c4cjob', name='duration', field=models.IntegerField(null=True), preserve_default=True, ), ]
normal
{ "blob_id": "30986eb0a6cd82f837dd14fb383529a6a41def9a", "index": 8338, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('auth', '0001_initial'), ('c4c_app',\n '0006_c4cjob_complete')]\n operations = [migrations.AlterModelOptions(name='c4cbranch', options={\n 'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),\n migrations.AlterModelOptions(name='c4cdonation', options={\n 'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),\n migrations.AlterModelOptions(name='c4cevent', options={\n 'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),\n migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':\n 'Job', 'verbose_name_plural': 'Jobs'}), migrations.\n AlterModelOptions(name='c4cuser', options={'verbose_name':\n 'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.\n RemoveField(model_name='c4cbranch', name='officers'), migrations.\n AddField(model_name='c4cbranch', name='group', field=models.\n OneToOneField(related_name='in_branches', default=None, to=\n 'auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cbranch', name='officers_group', field=models.\n OneToOneField(related_name='is_branch_officer_of', default=None, to\n ='auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cjob', name='offer', field=models.BooleanField(\n default=False), preserve_default=True), migrations.AlterField(\n model_name='c4cjob', name='duration', field=models.IntegerField(\n null=True), preserve_default=True)]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('auth', '0001_initial'), ('c4c_app',\n '0006_c4cjob_complete')]\n operations = [migrations.AlterModelOptions(name='c4cbranch', options={\n 'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'}),\n migrations.AlterModelOptions(name='c4cdonation', options={\n 'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'}),\n migrations.AlterModelOptions(name='c4cevent', options={\n 'verbose_name': 'Event', 'verbose_name_plural': 'Events'}),\n migrations.AlterModelOptions(name='c4cjob', options={'verbose_name':\n 'Job', 'verbose_name_plural': 'Jobs'}), migrations.\n AlterModelOptions(name='c4cuser', options={'verbose_name':\n 'C4C User', 'verbose_name_plural': 'C4C Users'}), migrations.\n RemoveField(model_name='c4cbranch', name='officers'), migrations.\n AddField(model_name='c4cbranch', name='group', field=models.\n OneToOneField(related_name='in_branches', default=None, to=\n 'auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cbranch', name='officers_group', field=models.\n OneToOneField(related_name='is_branch_officer_of', default=None, to\n ='auth.Group'), preserve_default=False), migrations.AddField(\n model_name='c4cjob', name='offer', field=models.BooleanField(\n default=False), preserve_default=True), migrations.AlterField(\n model_name='c4cjob', name='duration', field=models.IntegerField(\n null=True), preserve_default=True)]\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0001_initial'),\n ('c4c_app', '0006_c4cjob_complete'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='c4cbranch',\n options={'verbose_name': 'Branch', 'verbose_name_plural': 'Branches'},\n ),\n migrations.AlterModelOptions(\n name='c4cdonation',\n options={'verbose_name': 'Donation', 'verbose_name_plural': 'Donations'},\n ),\n migrations.AlterModelOptions(\n name='c4cevent',\n options={'verbose_name': 'Event', 'verbose_name_plural': 'Events'},\n ),\n migrations.AlterModelOptions(\n name='c4cjob',\n options={'verbose_name': 'Job', 'verbose_name_plural': 'Jobs'},\n ),\n migrations.AlterModelOptions(\n name='c4cuser',\n options={'verbose_name': 'C4C User', 'verbose_name_plural': 'C4C Users'},\n ),\n migrations.RemoveField(\n model_name='c4cbranch',\n name='officers',\n ),\n migrations.AddField(\n model_name='c4cbranch',\n name='group',\n field=models.OneToOneField(related_name='in_branches', default=None, to='auth.Group'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='c4cbranch',\n name='officers_group',\n field=models.OneToOneField(related_name='is_branch_officer_of', default=None, to='auth.Group'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='c4cjob',\n name='offer',\n field=models.BooleanField(default=False),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='c4cjob',\n name='duration',\n field=models.IntegerField(null=True),\n preserve_default=True,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.classify import NaiveBayesClassifier from nltk.probability import FreqDist import csv f = open('trolls.csv', 'r') file = csv.reader(f) sentences=[] remarks=[] psObject = PorterStemmer() illegal_chars = [ '.',',','@',"'",'+','-','*', ] paragraph='' for kk in file : paragraph+=kk[0] f.close() f = open('trolls.csv', 'r') file = csv.reader(f) all_words = word_tokenize(paragraph) # print(all_words) all2 = FreqDist(all_words) most_common_words = list(all2.most_common(100)) print('most commons below...') print(most_common_words) most_cm_1=[] for i,j in most_common_words: most_cm_1.append(i) # print(most_cm_1) stopWords = stopwords.words('english') all_words = [] for i in file : filtered='' filtered_from_stopWords='' counter = 0 for j in range(len(illegal_chars)) : if counter == 0: counter+=1 filtered = i[0].replace(illegal_chars[j], '') else : filtered=filtered.replace(illegal_chars[j],'') counter=0 filteredArr = filtered.split(' ') for x in filteredArr : if x not in stopWords : filtered_from_stopWords+=x+' ' bb=[] filtered_from_stopWords_ARRAY=filtered_from_stopWords.split(' ') features = {w.lower(): (w in most_cm_1) for w in filtered_from_stopWords_ARRAY} bb.append(features) bb.append(i[1]) sentences.append(bb) remarks.append(i[1]) count =0 print(remarks) print(sentences) classifier = NaiveBayesClassifier.train(sentences) inputs = input('Enter a comment ') words_entered=inputs.split(' ') entry = {w: ( True) for w in words_entered} print(classifier.classify(entry))
normal
{ "blob_id": "0dbdd7f7adffed850f126a2054c764b421c6ab84", "index": 6799, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor kk in file:\n paragraph += kk[0]\nf.close()\n<mask token>\nprint('most commons below...')\nprint(most_common_words)\n<mask token>\nfor i, j in most_common_words:\n most_cm_1.append(i)\n<mask token>\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\n<mask token>\nprint(remarks)\nprint(sentences)\n<mask token>\nprint(classifier.classify(entry))\n", "step-3": "<mask token>\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences = []\nremarks = []\npsObject = PorterStemmer()\nillegal_chars = ['.', ',', '@', \"'\", '+', '-', '*']\nparagraph = ''\nfor kk in file:\n paragraph += kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1 = []\nfor i, j in most_common_words:\n most_cm_1.append(i)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\ncount = 0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered = inputs.split(' ')\nentry = {w: (True) for w in words_entered}\nprint(classifier.classify(entry))\n", "step-4": "from nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.probability import FreqDist\nimport csv\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences = []\nremarks = []\npsObject = PorterStemmer()\nillegal_chars = ['.', ',', '@', \"'\", '+', '-', '*']\nparagraph = ''\nfor kk in file:\n paragraph += kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1 = []\nfor i, j in most_common_words:\n most_cm_1.append(i)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file:\n filtered = ''\n filtered_from_stopWords = ''\n counter = 0\n for j in range(len(illegal_chars)):\n if counter == 0:\n counter += 1\n filtered = i[0].replace(illegal_chars[j], '')\n else:\n filtered = filtered.replace(illegal_chars[j], '')\n counter = 0\n filteredArr = filtered.split(' ')\n for x in filteredArr:\n if x not in stopWords:\n filtered_from_stopWords += x + ' '\n bb = []\n filtered_from_stopWords_ARRAY = filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in\n filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\ncount = 0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered = inputs.split(' ')\nentry = {w: (True) for w in words_entered}\nprint(classifier.classify(entry))\n", "step-5": "from nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom nltk.classify import NaiveBayesClassifier\nfrom nltk.probability import FreqDist\nimport csv\n\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nsentences=[]\nremarks=[]\npsObject = PorterStemmer()\n\nillegal_chars = [\n '.',',','@',\"'\",'+','-','*',\n]\nparagraph=''\n\nfor kk in file :\n paragraph+=kk[0]\nf.close()\nf = open('trolls.csv', 'r')\nfile = csv.reader(f)\nall_words = word_tokenize(paragraph)\n# print(all_words)\nall2 = FreqDist(all_words)\nmost_common_words = list(all2.most_common(100))\nprint('most commons below...')\nprint(most_common_words)\nmost_cm_1=[]\nfor i,j in most_common_words:\n most_cm_1.append(i)\n# print(most_cm_1)\nstopWords = stopwords.words('english')\nall_words = []\nfor i in file :\n filtered=''\n filtered_from_stopWords=''\n counter = 0\n for j in range(len(illegal_chars)) :\n if counter == 0:\n counter+=1\n filtered = i[0].replace(illegal_chars[j], '')\n else :\n filtered=filtered.replace(illegal_chars[j],'')\n counter=0\n filteredArr = filtered.split(' ')\n for x in filteredArr :\n if x not in stopWords :\n filtered_from_stopWords+=x+' '\n bb=[]\n filtered_from_stopWords_ARRAY=filtered_from_stopWords.split(' ')\n features = {w.lower(): (w in most_cm_1) for w in filtered_from_stopWords_ARRAY}\n bb.append(features)\n bb.append(i[1])\n sentences.append(bb)\n remarks.append(i[1])\n\ncount =0\nprint(remarks)\nprint(sentences)\nclassifier = NaiveBayesClassifier.train(sentences)\ninputs = input('Enter a comment ')\nwords_entered=inputs.split(' ')\nentry = {w: ( True) for w in words_entered}\n\nprint(classifier.classify(entry))\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from flask import Flask, request, render_template, redirect import os import smtplib from email.message import EmailMessage app = Flask(__name__) EMAIL_ADDRESS = os.environ.get('EMAIL_USER') EMAIL_PASSWORD = os.environ.get('EMAIL_PASS') @app.route('/') def index(): return render_template('index.html') @app.route('/submit', methods=['POST']) def submit(): if request.method == 'POST': name = request.form['name'] email = request.form['email'] subject = request.form['subject'] message = request.form['message'] msg = EmailMessage() msg['From'] = email msg['To'] = EMAIL_ADDRESS msg['Subject'] = subject msg.set_content(message) with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp: smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD) smtp.send_message(msg) return render_template('success.html') return render_template('index.html') if __name__ == '__main__': app.run()
normal
{ "blob_id": "27d9e6a868cfc18780ec9615e8dbc3b5ea2fd0c3", "index": 1399, "step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n", "step-3": "<mask token>\napp = Flask(__name__)\nEMAIL_ADDRESS = os.environ.get('EMAIL_USER')\nEMAIL_PASSWORD = os.environ.get('EMAIL_PASS')\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n", "step-4": "from flask import Flask, request, render_template, redirect\nimport os\nimport smtplib\nfrom email.message import EmailMessage\napp = Flask(__name__)\nEMAIL_ADDRESS = os.environ.get('EMAIL_USER')\nEMAIL_PASSWORD = os.environ.get('EMAIL_PASS')\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/submit', methods=['POST'])\ndef submit():\n if request.method == 'POST':\n name = request.form['name']\n email = request.form['email']\n subject = request.form['subject']\n message = request.form['message']\n msg = EmailMessage()\n msg['From'] = email\n msg['To'] = EMAIL_ADDRESS\n msg['Subject'] = subject\n msg.set_content(message)\n with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return render_template('success.html')\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from machina.apps.forum_conversation.abstract_models import AbstractPost from machina.apps.forum_conversation.abstract_models import AbstractTopic from machina.core.db.models import model_factory from django.dispatch import receiver from django.db.models.signals import post_save from django.contrib.auth.models import User from django.db import models from django.utils.translation import ugettext_lazy as _ Topic = model_factory(AbstractTopic) class UserNotification(models.Model): user = models.ForeignKey(User, on_delete=models.CASCADE) notification_content = models.CharField(max_length=100) notification_link = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) class Post(AbstractPost): __original_flags = None __original_votes = None def __init__(self, *args, **kwargs): super(Post, self).__init__(*args, **kwargs) self.__original_flags = self.flag_count self.__original_votes = self.vote_count def save(self, force_insert=False, force_update=False, *args, **kwargs): super(Post, self).save(force_insert, force_update, *args, **kwargs) notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id) if self.__original_flags != self.flag_count: n = UserNotification(user=self.poster, notification_content="Flag updates on post {}".format(self.subject), notification_link=notification_link) n.save() if self.__original_votes != self.vote_count: n = UserNotification(user=self.poster, notification_content="Vote update on post {}".format(self.subject), notification_link=notification_link) n.save() self.__original_flags = self.flag_count self.__original_votes = self.vote_count class Userflags(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) flag_count = models.PositiveIntegerField( verbose_name=_('Flag count'), editable=False, blank=True, default=0) @receiver(post_save, sender=User) def create_userflags(sender, instance, created, **kwargs): if created: Userflags.objects.create(user=instance) @receiver(post_save, sender=User) def save_userflags(sender, instance, **kwargs): instance.userflags.save() @receiver(post_save, sender=Post) def make_notifications(sender, instance, created, **kwargs): user = instance.topic.poster notification_content = "You have a new notification" notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id) if created: notification_content = "A new post was created on your topic {}".format(instance.topic.slug) else: notification_content = "A post's contetn was edited on your topic {}".format(instance.topic.slug) n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content) n.save()
normal
{ "blob_id": "1e81e0f3cb2fb25fdef08a913aa1ff77d0c2a562", "index": 9204, "step-1": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n<mask token>\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n", "step-2": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n", "step-3": "<mask token>\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n", "step-4": "from __future__ import unicode_literals\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n", "step-5": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\"Flag updates on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\"Vote update on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n flag_count = models.PositiveIntegerField(\n verbose_name=_('Flag count'), editable=False, blank=True, default=0)\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance) \n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save() \n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = \"You have a new notification\"\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)\n\n if created:\n notification_content = \"A new post was created on your topic {}\".format(instance.topic.slug)\n else:\n notification_content = \"A post's contetn was edited on your topic {}\".format(instance.topic.slug)\n\n n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)\n n.save()\n", "step-ids": [ 10, 11, 12, 13, 14 ] }
[ 10, 11, 12, 13, 14 ]
import numpy as np import time import uuid from datetime import datetime log_host = "agent1" class State: def __init__(self, path, iterations): self.path = path self.iterations = iterations def run(self): assert 0, "run not implemented" class BruteForceAttackState(State): def run(self): os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10']) addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42']) for i in range(self.iterations): timestamp = datetime.now() log_id = uuid.uuid4() message = "Unsuccessful login attempt" os = os_val log_type = "Informational" host = log_host log_machine = addr_val log = str(timestamp)+"|"+str(log_id)+"|"+message+"|"+os+"|"+log_type+"|"+host+"|"+log_machine print(log) f = open(self.path, "a") f.write(log + "\n") f.close() time.sleep(0.2) class NoAlarmState(State): def run(self): for i in range(self.iterations): os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10']) addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42']) timestamp = datetime.now() log_id = uuid.uuid4() message = "Unsuccessful login attempt" os = os_val log_type = "Informational" host = log_host log_machine = addr_val log = str(timestamp)+"|"+str(log_id)+"|"+message+"|"+os+"|"+log_type+"|"+host+"|"+log_machine print(log) f = open(self.path, "a") f.write(log + "\n") f.close() time.sleep(1.5)
normal
{ "blob_id": "cf3b4e2c76091f95d24e8a987a63ece46503d6e8", "index": 3459, "step-1": "<mask token>\n\n\nclass BruteForceAttackState(State):\n\n def run(self):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13',\n '127.0.0.42'])\n for i in range(self.iterations):\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(0.2)\n\n\nclass NoAlarmState(State):\n\n def run(self):\n for i in range(self.iterations):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7',\n '127.0.0.13', '127.0.0.42'])\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(1.5)\n", "step-2": "<mask token>\n\n\nclass State:\n\n def __init__(self, path, iterations):\n self.path = path\n self.iterations = iterations\n\n def run(self):\n assert 0, 'run not implemented'\n\n\nclass BruteForceAttackState(State):\n\n def run(self):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13',\n '127.0.0.42'])\n for i in range(self.iterations):\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(0.2)\n\n\nclass NoAlarmState(State):\n\n def run(self):\n for i in range(self.iterations):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7',\n '127.0.0.13', '127.0.0.42'])\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(1.5)\n", "step-3": "<mask token>\nlog_host = 'agent1'\n\n\nclass State:\n\n def __init__(self, path, iterations):\n self.path = path\n self.iterations = iterations\n\n def run(self):\n assert 0, 'run not implemented'\n\n\nclass BruteForceAttackState(State):\n\n def run(self):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13',\n '127.0.0.42'])\n for i in range(self.iterations):\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(0.2)\n\n\nclass NoAlarmState(State):\n\n def run(self):\n for i in range(self.iterations):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7',\n '127.0.0.13', '127.0.0.42'])\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(1.5)\n", "step-4": "import numpy as np\nimport time\nimport uuid\nfrom datetime import datetime\nlog_host = 'agent1'\n\n\nclass State:\n\n def __init__(self, path, iterations):\n self.path = path\n self.iterations = iterations\n\n def run(self):\n assert 0, 'run not implemented'\n\n\nclass BruteForceAttackState(State):\n\n def run(self):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13',\n '127.0.0.42'])\n for i in range(self.iterations):\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(0.2)\n\n\nclass NoAlarmState(State):\n\n def run(self):\n for i in range(self.iterations):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16',\n 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7',\n '127.0.0.13', '127.0.0.42'])\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = 'Unsuccessful login attempt'\n os = os_val\n log_type = 'Informational'\n host = log_host\n log_machine = addr_val\n log = (str(timestamp) + '|' + str(log_id) + '|' + message + '|' +\n os + '|' + log_type + '|' + host + '|' + log_machine)\n print(log)\n f = open(self.path, 'a')\n f.write(log + '\\n')\n f.close()\n time.sleep(1.5)\n", "step-5": "import numpy as np\nimport time\nimport uuid\nfrom datetime import datetime\n\n\nlog_host = \"agent1\"\n\n\nclass State:\n def __init__(self, path, iterations):\n self.path = path\n self.iterations = iterations\n\n def run(self):\n assert 0, \"run not implemented\"\n\n\nclass BruteForceAttackState(State):\n def run(self):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42'])\n for i in range(self.iterations):\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = \"Unsuccessful login attempt\"\n os = os_val\n log_type = \"Informational\"\n host = log_host\n log_machine = addr_val\n\n log = str(timestamp)+\"|\"+str(log_id)+\"|\"+message+\"|\"+os+\"|\"+log_type+\"|\"+host+\"|\"+log_machine\n print(log)\n\n f = open(self.path, \"a\")\n f.write(log + \"\\n\")\n f.close()\n time.sleep(0.2)\n\n\nclass NoAlarmState(State):\n def run(self):\n for i in range(self.iterations):\n os_val = np.random.choice(['Windows7', 'Windows10', 'Ubuntu16', 'MacOS10'])\n addr_val = np.random.choice(['127.0.0.6', '127.0.0.7', '127.0.0.13', '127.0.0.42'])\n timestamp = datetime.now()\n log_id = uuid.uuid4()\n message = \"Unsuccessful login attempt\"\n os = os_val\n log_type = \"Informational\"\n host = log_host\n log_machine = addr_val\n\n log = str(timestamp)+\"|\"+str(log_id)+\"|\"+message+\"|\"+os+\"|\"+log_type+\"|\"+host+\"|\"+log_machine\n print(log)\n\n f = open(self.path, \"a\")\n f.write(log + \"\\n\")\n f.close()\n time.sleep(1.5)\n", "step-ids": [ 4, 7, 8, 9, 10 ] }
[ 4, 7, 8, 9, 10 ]
def formula(a,b): if(b == 0): print "You can not divide by zero" else: return (a+b)/b print formula(4,4) print formula(2,0)
normal
{ "blob_id": "dffd575b9d5b763abdbce6f88586c183b71086c4", "index": 7701, "step-1": "def formula(a,b):\n if(b == 0):\n print \"You can not divide by zero\"\n else:\n return (a+b)/b \n\n\nprint formula(4,4)\nprint formula(2,0)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import numpy as np from sklearn.decomposition import PCA import pandas as pd from numpy.testing import assert_array_almost_equal import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from sklearn import decomposition from sklearn import datasets def transform(x): if x == 'Kama': return 0 elif x == 'Rosa': return 1 else: return 2 original = pd.read_csv("seeds.csv") original["Class"] = original["Class"].apply(lambda x: transform(x)) X = original.drop("Class", 1) y = original["Class"] fig = plt.figure(1, figsize=(4,3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134) plt.cla() pca= PCA(n_components = 3) pca.fit(X) X = pca.transform(X) for name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]: ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y == label, 2].mean(), name, horizontalalignment='center', bbox=dict(alpha=.5, edgecolor='w', facecolor='w')) # Reorder the labels to have colors matching the cluster results y = np.choose(y, [1, 2, 0]).astype(np.float) ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral, edgecolor='k') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) plt.show() # pca = PCA(n_components=3) # pca.fit(df) # U, S, VT = np.linalg.svd(df - df.mean(0)) # #assert_array_almost_equal(VT[:6], pca.components_) # X_train_pca = pca.transform(df) # X_train_pca2 = (df - pca.mean_).dot(pca.components_.T) # #assert_array_almost_equal(X_train_pca, X_train_pca2) # X_projected = pca.inverse_transform(X_train_pca) # X_projected2 = X_train_pca.dot(pca.components_) + pca.mean_ # #assert_array_almost_equal(X_projected, X_projected2) # loss = ((df - X_projected) ** 2).mean() # print(loss) # sse_loss = np.sum((df-X_projected)**2) # print(sse_loss) # print(pca.components_) # print(pca.explained_variance_ratio_) # # loadings # loadings = pca.components_.T * np.sqrt(pca.explained_variance_) # print(loadings) # print(X_projected) # print(len(X_projected)) # print(len(X_projected[0])) # # We center the data and compute the sample covariance matrix. # X_centered = df - np.mean(df, axis=0) # cov_matrix = np.dot(X_centered.T, X_centered) / 569 # eigenvalues = pca.explained_variance_ # for eigenvalue, eigenvector in zip(eigenvalues, pca.components_): # print(np.dot(eigenvector.T, np.dot(cov_matrix, eigenvector))) # print(eigenvalue) #np.savetxt("wdbc_ica.csv", X_projected, delimiter=",") # print(pca) # print(pca.explained_variance_ratio_) # print(pca.singular_values_) # print(len(pca.transform(df))) # print(len(pca.transform(df)[0]))
normal
{ "blob_id": "ef04e808a2a0e6570b28ef06784322e0b2ca1f8f", "index": 4774, "step-1": "<mask token>\n\n\ndef transform(x):\n if x == 'Kama':\n return 0\n elif x == 'Rosa':\n return 1\n else:\n return 2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef transform(x):\n if x == 'Kama':\n return 0\n elif x == 'Rosa':\n return 1\n else:\n return 2\n\n\n<mask token>\nplt.clf()\n<mask token>\nplt.cla()\n<mask token>\npca.fit(X)\n<mask token>\nfor name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]:\n ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y ==\n label, 2].mean(), name, horizontalalignment='center', bbox=dict(\n alpha=0.5, edgecolor='w', facecolor='w'))\n<mask token>\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral, edgecolor='k')\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nplt.show()\n", "step-3": "<mask token>\n\n\ndef transform(x):\n if x == 'Kama':\n return 0\n elif x == 'Rosa':\n return 1\n else:\n return 2\n\n\noriginal = pd.read_csv('seeds.csv')\noriginal['Class'] = original['Class'].apply(lambda x: transform(x))\nX = original.drop('Class', 1)\ny = original['Class']\nfig = plt.figure(1, figsize=(4, 3))\nplt.clf()\nax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\nplt.cla()\npca = PCA(n_components=3)\npca.fit(X)\nX = pca.transform(X)\nfor name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]:\n ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y ==\n label, 2].mean(), name, horizontalalignment='center', bbox=dict(\n alpha=0.5, edgecolor='w', facecolor='w'))\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral, edgecolor='k')\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nplt.show()\n", "step-4": "import numpy as np\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom numpy.testing import assert_array_almost_equal\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import decomposition\nfrom sklearn import datasets\n\n\ndef transform(x):\n if x == 'Kama':\n return 0\n elif x == 'Rosa':\n return 1\n else:\n return 2\n\n\noriginal = pd.read_csv('seeds.csv')\noriginal['Class'] = original['Class'].apply(lambda x: transform(x))\nX = original.drop('Class', 1)\ny = original['Class']\nfig = plt.figure(1, figsize=(4, 3))\nplt.clf()\nax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\nplt.cla()\npca = PCA(n_components=3)\npca.fit(X)\nX = pca.transform(X)\nfor name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]:\n ax.text3D(X[y == label, 0].mean(), X[y == label, 1].mean() + 1.5, X[y ==\n label, 2].mean(), name, horizontalalignment='center', bbox=dict(\n alpha=0.5, edgecolor='w', facecolor='w'))\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral, edgecolor='k')\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\nplt.show()\n", "step-5": "import numpy as np\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nfrom numpy.testing import assert_array_almost_equal\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import decomposition\nfrom sklearn import datasets\n\ndef transform(x):\n\tif x == 'Kama':\n\t\treturn 0\n\telif x == 'Rosa':\n\t\treturn 1\n\telse:\n\t\treturn 2\n\n\noriginal = pd.read_csv(\"seeds.csv\")\noriginal[\"Class\"] = original[\"Class\"].apply(lambda x: transform(x))\nX = original.drop(\"Class\", 1)\ny = original[\"Class\"] \nfig = plt.figure(1, figsize=(4,3))\nplt.clf()\nax = Axes3D(fig, rect=[0, 0, 0.95, 1], elev=48, azim=134)\n\nplt.cla()\npca= PCA(n_components = 3)\npca.fit(X)\nX = pca.transform(X)\n\nfor name, label in [('Kama', 0), ('Rosa', 1), ('Canadian', 2)]:\n\tax.text3D(X[y == label, 0].mean(),\n X[y == label, 1].mean() + 1.5,\n X[y == label, 2].mean(), name,\n horizontalalignment='center',\n bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))\n# Reorder the labels to have colors matching the cluster results\ny = np.choose(y, [1, 2, 0]).astype(np.float)\nax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral,\n edgecolor='k')\n\nax.w_xaxis.set_ticklabels([])\nax.w_yaxis.set_ticklabels([])\nax.w_zaxis.set_ticklabels([])\n\nplt.show()\n# pca = PCA(n_components=3)\n# pca.fit(df)\n\n# U, S, VT = np.linalg.svd(df - df.mean(0))\n# #assert_array_almost_equal(VT[:6], pca.components_)\n\n# X_train_pca = pca.transform(df)\n# X_train_pca2 = (df - pca.mean_).dot(pca.components_.T)\n# #assert_array_almost_equal(X_train_pca, X_train_pca2)\n\n# X_projected = pca.inverse_transform(X_train_pca)\n# X_projected2 = X_train_pca.dot(pca.components_) + pca.mean_\n# #assert_array_almost_equal(X_projected, X_projected2)\n\n# loss = ((df - X_projected) ** 2).mean()\n# print(loss)\n# sse_loss = np.sum((df-X_projected)**2)\n# print(sse_loss)\n# print(pca.components_)\n# print(pca.explained_variance_ratio_)\n# # loadings\n# loadings = pca.components_.T * np.sqrt(pca.explained_variance_)\n# print(loadings)\n# print(X_projected)\n# print(len(X_projected))\n# print(len(X_projected[0]))\n\n# # We center the data and compute the sample covariance matrix.\n# X_centered = df - np.mean(df, axis=0)\n# cov_matrix = np.dot(X_centered.T, X_centered) / 569\n# eigenvalues = pca.explained_variance_\n# for eigenvalue, eigenvector in zip(eigenvalues, pca.components_): \n# print(np.dot(eigenvector.T, np.dot(cov_matrix, eigenvector)))\n# print(eigenvalue)\n\n#np.savetxt(\"wdbc_ica.csv\", X_projected, delimiter=\",\")\n\n\n# print(pca)\n# print(pca.explained_variance_ratio_)\n# print(pca.singular_values_)\n# print(len(pca.transform(df)))\n# print(len(pca.transform(df)[0]))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.test import TestCase, Client from pdf_crawler.models import Document from rest_framework.reverse import reverse class TestCase(TestCase): client = Client() def setUp(self): Document.objects.create(name='First').save() def test_endpoints(self): """ test for endpoints """ self.assertEqual(self.client.get(reverse( 'pdf_crawler:document-list')).status_code, 200) self.assertEqual(self.client.get(reverse( 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200) self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')). status_code, 200)
normal
{ "blob_id": "0d28ab54f08301d9788ca9a5e46d522e043e9507", "index": 4474, "step-1": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestCase(TestCase):\n <mask token>\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n", "step-3": "<mask token>\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n", "step-4": "from django.test import TestCase, Client\nfrom pdf_crawler.models import Document\nfrom rest_framework.reverse import reverse\n\n\nclass TestCase(TestCase):\n client = Client()\n\n def setUp(self):\n Document.objects.create(name='First').save()\n\n def test_endpoints(self):\n \"\"\"\n test for endpoints\n \"\"\"\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-list')).status_code, 200)\n self.assertEqual(self.client.get(reverse(\n 'pdf_crawler:document-detail', kwargs={'pk': 1})).status_code, 200)\n self.assertEqual(self.client.get(reverse('pdf_crawler:url-list')).\n status_code, 200)\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
# Ejercicio 28 - Hoja VI (5) - Indicar la nota ponderada según el criterio dado # (parte teórica 60%, práctica 40%) de cada uno de un número determinado de alumnos numalumnos=int(input("Introduce el número total de alumnos:\n")) print("Usa el punto '.' para los decimales") for contador in range(1,numalumnos+1): print(f"\nDatos del alumno número {contador} de {numalumnos}:") teorica=float(input("- Introduce la nota de la parte teórica: ")) practica=float(input("- Introduce la nota de la parte practica: ")) nota=(teorica*60/100)+(practica*40/100) print(f"La nota final del alumno número {contador} es {nota:.2f}.\n") print("Ya se han calculado todas las notas.")
normal
{ "blob_id": "f2056ff46ce6e38c3b6ca553bbdec7f59d60b198", "index": 1417, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(\"Usa el punto '.' para los decimales\")\nfor contador in range(1, numalumnos + 1):\n print(f'\\nDatos del alumno número {contador} de {numalumnos}:')\n teorica = float(input('- Introduce la nota de la parte teórica: '))\n practica = float(input('- Introduce la nota de la parte practica: '))\n nota = teorica * 60 / 100 + practica * 40 / 100\n print(f'La nota final del alumno número {contador} es {nota:.2f}.\\n')\nprint('Ya se han calculado todas las notas.')\n", "step-3": "numalumnos = int(input('Introduce el número total de alumnos:\\n'))\nprint(\"Usa el punto '.' para los decimales\")\nfor contador in range(1, numalumnos + 1):\n print(f'\\nDatos del alumno número {contador} de {numalumnos}:')\n teorica = float(input('- Introduce la nota de la parte teórica: '))\n practica = float(input('- Introduce la nota de la parte practica: '))\n nota = teorica * 60 / 100 + practica * 40 / 100\n print(f'La nota final del alumno número {contador} es {nota:.2f}.\\n')\nprint('Ya se han calculado todas las notas.')\n", "step-4": "# Ejercicio 28 - Hoja VI (5) - Indicar la nota ponderada según el criterio dado\n# (parte teórica 60%, práctica 40%) de cada uno de un número determinado de alumnos\n\nnumalumnos=int(input(\"Introduce el número total de alumnos:\\n\"))\nprint(\"Usa el punto '.' para los decimales\")\nfor contador in range(1,numalumnos+1):\n print(f\"\\nDatos del alumno número {contador} de {numalumnos}:\")\n teorica=float(input(\"- Introduce la nota de la parte teórica: \"))\n practica=float(input(\"- Introduce la nota de la parte practica: \"))\n nota=(teorica*60/100)+(practica*40/100)\n print(f\"La nota final del alumno número {contador} es {nota:.2f}.\\n\")\nprint(\"Ya se han calculado todas las notas.\")\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
version https://git-lfs.github.com/spec/v1 oid sha256:7f0b7267333e6a4a73d3df0ee7f384f7b3cb6ffb14ed2dc8a5894b853bac8957 size 1323
normal
{ "blob_id": "f1972baee8b399c9a52561c8f015f71cb9922bb0", "index": 4875, "step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:7f0b7267333e6a4a73d3df0ee7f384f7b3cb6ffb14ed2dc8a5894b853bac8957\nsize 1323\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Experimental Resolver for getting the latest artifact.""" from typing import Dict, List, Optional from tfx import types from tfx.dsl.components.common import resolver from tfx.types import standard_artifacts from tfx.utils import doc_controls import ml_metadata as mlmd try: from tfx.components.evaluator import constants as eval_consts # pylint: disable=g-import-not-at-top _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY except ImportError: # ml-pipelines-sdk package doesn't have tfx.components. _CURRENT_MODEL_ID = 'current_model_id' _BLESSED = 'blessed' class LatestBlessedModelStrategy(resolver.ResolverStrategy): """LatestBlessedModelStrategy resolves the latest blessed Model artifact. Note that this ResolverStrategy is experimental and is subject to change in terms of both interface and implementation. Don't construct LatestBlessedModelStrategy directly, example usage: ``` model_resolver = Resolver( strategy_class=LatestBlessedModelStrategy, model=Channel(type=Model), model_blessing=Channel(type=ModelBlessing), ).with_id('latest_blessed_model_resolver') model_resolver.outputs['model'] ``` """ def _resolve(self, input_dict: Dict[str, List[types.Artifact]], model_channel_key: str, model_blessing_channel_key: str): all_models = input_dict[model_channel_key] all_models.sort(key=lambda a: a.id, reverse=True) all_model_blessings = input_dict[model_blessing_channel_key] # Makes a dict of {model_id : ModelBlessing artifact} for blessed models. all_blessed_model_ids = { a.get_int_custom_property(_CURRENT_MODEL_ID): a for a in all_model_blessings if a.get_int_custom_property(_BLESSED) == 1} result = {model_channel_key: [], model_blessing_channel_key: []} # Iterates all models, if blessed, set as result. As the model list was # sorted, it is guaranteed to get the latest blessed model. for model in all_models: if model.id in all_blessed_model_ids: result[model_channel_key] = [model] model_blessing = all_blessed_model_ids[model.id] result[model_blessing_channel_key] = [model_blessing] break return result @doc_controls.do_not_generate_docs def resolve_artifacts( self, store: mlmd.MetadataStore, input_dict: Dict[str, List[types.Artifact]] ) -> Optional[Dict[str, List[types.Artifact]]]: """Resolves artifacts from channels by querying MLMD. Args: store: An MLMD MetadataStore object. input_dict: The input_dict to resolve from. Returns: The latest blessed Model and its corresponding ModelBlessing, respectively in the same input channel they were contained to. Raises: RuntimeError: if input_dict contains unsupported artifact types. """ model_channel_key = None model_blessing_channel_key = None assert len(input_dict) == 2, 'Expecting 2 input Channels' for k, artifact_list in input_dict.items(): if not artifact_list: # If model or model blessing channel has no artifacts, the min_count # can not be met, short cut to return empty dict here. return {key: [] for key in input_dict} artifact = artifact_list[0] if issubclass(type(artifact), standard_artifacts.Model): model_channel_key = k elif issubclass(type(artifact), standard_artifacts.ModelBlessing): model_blessing_channel_key = k else: raise RuntimeError('Only expecting Model or ModelBlessing, got %s' % artifact.TYPE_NAME) assert model_channel_key is not None, 'Expecting Model as input' assert model_blessing_channel_key is not None, ('Expecting ModelBlessing as' ' input') result = self._resolve(input_dict, model_channel_key, model_blessing_channel_key) return result
normal
{ "blob_id": "30df17d636c33d2824aad7d7ef6aae7db83615ec", "index": 8058, "step-1": "<mask token>\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n <mask token>\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n", "step-2": "<mask token>\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n", "step-3": "<mask token>\ntry:\n from tfx.components.evaluator import constants as eval_consts\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n", "step-4": "<mask token>\nfrom typing import Dict, List, Optional\nfrom tfx import types\nfrom tfx.dsl.components.common import resolver\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import doc_controls\nimport ml_metadata as mlmd\ntry:\n from tfx.components.evaluator import constants as eval_consts\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n all_blessed_model_ids = {a.get_int_custom_property(\n _CURRENT_MODEL_ID): a for a in all_model_blessings if a.\n get_int_custom_property(_BLESSED) == 1}\n result = {model_channel_key: [], model_blessing_channel_key: []}\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(self, store: mlmd.MetadataStore, input_dict: Dict\n [str, List[types.Artifact]]) ->Optional[Dict[str, List[types.Artifact]]\n ]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError(\n 'Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, 'Expecting ModelBlessing as input'\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n", "step-5": "# Copyright 2021 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Experimental Resolver for getting the latest artifact.\"\"\"\n\nfrom typing import Dict, List, Optional\n\nfrom tfx import types\nfrom tfx.dsl.components.common import resolver\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import doc_controls\n\nimport ml_metadata as mlmd\n\ntry:\n from tfx.components.evaluator import constants as eval_consts # pylint: disable=g-import-not-at-top\n _CURRENT_MODEL_ID = eval_consts.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY\n _BLESSED = eval_consts.ARTIFACT_PROPERTY_BLESSED_KEY\nexcept ImportError:\n # ml-pipelines-sdk package doesn't have tfx.components.\n _CURRENT_MODEL_ID = 'current_model_id'\n _BLESSED = 'blessed'\n\n\nclass LatestBlessedModelStrategy(resolver.ResolverStrategy):\n \"\"\"LatestBlessedModelStrategy resolves the latest blessed Model artifact.\n\n Note that this ResolverStrategy is experimental and is subject to change in\n terms of both interface and implementation.\n\n Don't construct LatestBlessedModelStrategy directly, example usage:\n ```\n model_resolver = Resolver(\n strategy_class=LatestBlessedModelStrategy,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing),\n ).with_id('latest_blessed_model_resolver')\n model_resolver.outputs['model']\n ```\n \"\"\"\n\n def _resolve(self, input_dict: Dict[str, List[types.Artifact]],\n model_channel_key: str, model_blessing_channel_key: str):\n all_models = input_dict[model_channel_key]\n all_models.sort(key=lambda a: a.id, reverse=True)\n all_model_blessings = input_dict[model_blessing_channel_key]\n\n # Makes a dict of {model_id : ModelBlessing artifact} for blessed models.\n all_blessed_model_ids = {\n a.get_int_custom_property(_CURRENT_MODEL_ID): a\n for a in all_model_blessings\n if a.get_int_custom_property(_BLESSED) == 1}\n\n result = {model_channel_key: [], model_blessing_channel_key: []}\n # Iterates all models, if blessed, set as result. As the model list was\n # sorted, it is guaranteed to get the latest blessed model.\n for model in all_models:\n if model.id in all_blessed_model_ids:\n result[model_channel_key] = [model]\n model_blessing = all_blessed_model_ids[model.id]\n result[model_blessing_channel_key] = [model_blessing]\n break\n\n return result\n\n @doc_controls.do_not_generate_docs\n def resolve_artifacts(\n self, store: mlmd.MetadataStore,\n input_dict: Dict[str, List[types.Artifact]]\n ) -> Optional[Dict[str, List[types.Artifact]]]:\n \"\"\"Resolves artifacts from channels by querying MLMD.\n\n Args:\n store: An MLMD MetadataStore object.\n input_dict: The input_dict to resolve from.\n\n Returns:\n The latest blessed Model and its corresponding ModelBlessing, respectively\n in the same input channel they were contained to.\n\n Raises:\n RuntimeError: if input_dict contains unsupported artifact types.\n \"\"\"\n model_channel_key = None\n model_blessing_channel_key = None\n assert len(input_dict) == 2, 'Expecting 2 input Channels'\n for k, artifact_list in input_dict.items():\n if not artifact_list:\n # If model or model blessing channel has no artifacts, the min_count\n # can not be met, short cut to return empty dict here.\n return {key: [] for key in input_dict}\n artifact = artifact_list[0]\n if issubclass(type(artifact), standard_artifacts.Model):\n model_channel_key = k\n elif issubclass(type(artifact), standard_artifacts.ModelBlessing):\n model_blessing_channel_key = k\n else:\n raise RuntimeError('Only expecting Model or ModelBlessing, got %s' %\n artifact.TYPE_NAME)\n assert model_channel_key is not None, 'Expecting Model as input'\n assert model_blessing_channel_key is not None, ('Expecting ModelBlessing as'\n ' input')\n\n result = self._resolve(input_dict, model_channel_key,\n model_blessing_channel_key)\n return result\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import sys import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sea import sklearn import glob import pydub from pydub import AudioSegment import time import librosa import noisereduce as nr from scipy.io import wavfile import IPython import sounddevice as sd from pysndfx import AudioEffectsChain import python_speech_features import sox import math #y,sr=librosa.load(r"C:\Users\pranj\OneDrive\Desktop\Project\72843_lonemonk_approx-800-laughter-only-1.wav") my,sr=librosa.load(r"C:\Users\pranj\Downloads\IEMOCAP_full_release_withoutVideos\IEMOCAP_full_release\Session1\sentences\wav\Ses01F_impro01\Ses01F_impro01_F000.wav") reduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,prop_decrease=0.8) print(IPython.display.Audio(data=my, rate=sr)) sd.play(my, sr) status = sd.wait()
normal
{ "blob_id": "14bf4befdce4270b4514b4e643964182f9c49ff4", "index": 8434, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\n<mask token>\n", "step-3": "<mask token>\nmy, sr = librosa.load(\n 'C:\\\\Users\\\\pranj\\\\Downloads\\\\IEMOCAP_full_release_withoutVideos\\\\IEMOCAP_full_release\\\\Session1\\\\sentences\\\\wav\\\\Ses01F_impro01\\\\Ses01F_impro01_F000.wav'\n )\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,\n prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()\n", "step-4": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sea\nimport sklearn\nimport glob\nimport pydub\nfrom pydub import AudioSegment\nimport time\nimport librosa\nimport noisereduce as nr\nfrom scipy.io import wavfile\nimport IPython\nimport sounddevice as sd\nfrom pysndfx import AudioEffectsChain\nimport python_speech_features\nimport sox\nimport math\nmy, sr = librosa.load(\n 'C:\\\\Users\\\\pranj\\\\Downloads\\\\IEMOCAP_full_release_withoutVideos\\\\IEMOCAP_full_release\\\\Session1\\\\sentences\\\\wav\\\\Ses01F_impro01\\\\Ses01F_impro01_F000.wav'\n )\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,\n prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()\n", "step-5": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sea\nimport sklearn \nimport glob\nimport pydub\nfrom pydub import AudioSegment\nimport time\nimport librosa\nimport noisereduce as nr\nfrom scipy.io import wavfile\nimport IPython\nimport sounddevice as sd\nfrom pysndfx import AudioEffectsChain\nimport python_speech_features\nimport sox\nimport math\n\n\n\n#y,sr=librosa.load(r\"C:\\Users\\pranj\\OneDrive\\Desktop\\Project\\72843_lonemonk_approx-800-laughter-only-1.wav\")\nmy,sr=librosa.load(r\"C:\\Users\\pranj\\Downloads\\IEMOCAP_full_release_withoutVideos\\IEMOCAP_full_release\\Session1\\sentences\\wav\\Ses01F_impro01\\Ses01F_impro01_F000.wav\")\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import logging def log_func(handler): if handler.get_status() < 400: log_method = logging.info elif handler.get_status() < 500: log_method = logging.warning else: log_method = logging.error request_time = 1000.0 * handler.request.request_time() log_method("%d %s %s (%s) %s %s %.2fms", handler.get_status(), handler.request.method, handler.request.uri, handler.request.remote_ip, handler.request.arguments, request_time) configs = dict( LOG_LEVEL=logging.INFO, # 日志等级 debug=True, # Debug log_function=log_func, # 日志处理方法 template_path='views', # html文件 static_path='statics', # 静态文件(css,js,img) static_url_prefix='/statics/', # 静态文件前缀 cookie_secret='suoning', # cookie自定义字符串加盐 xsrf_cookies=True, # 防止跨站伪造 )
normal
{ "blob_id": "e403be68894ba283d71a0b71bb0bfd0adfab8c41", "index": 8684, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef log_func(handler):\n if handler.get_status() < 400:\n log_method = logging.info\n elif handler.get_status() < 500:\n log_method = logging.warning\n else:\n log_method = logging.error\n request_time = 1000.0 * handler.request.request_time()\n log_method('%d %s %s (%s) %s %s %.2fms', handler.get_status(), handler.\n request.method, handler.request.uri, handler.request.remote_ip,\n handler.request.arguments, request_time)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef log_func(handler):\n if handler.get_status() < 400:\n log_method = logging.info\n elif handler.get_status() < 500:\n log_method = logging.warning\n else:\n log_method = logging.error\n request_time = 1000.0 * handler.request.request_time()\n log_method('%d %s %s (%s) %s %s %.2fms', handler.get_status(), handler.\n request.method, handler.request.uri, handler.request.remote_ip,\n handler.request.arguments, request_time)\n\n\nconfigs = dict(LOG_LEVEL=logging.INFO, debug=True, log_function=log_func,\n template_path='views', static_path='statics', static_url_prefix=\n '/statics/', cookie_secret='suoning', xsrf_cookies=True)\n", "step-4": "import logging\n\n\ndef log_func(handler):\n if handler.get_status() < 400:\n log_method = logging.info\n elif handler.get_status() < 500:\n log_method = logging.warning\n else:\n log_method = logging.error\n request_time = 1000.0 * handler.request.request_time()\n log_method('%d %s %s (%s) %s %s %.2fms', handler.get_status(), handler.\n request.method, handler.request.uri, handler.request.remote_ip,\n handler.request.arguments, request_time)\n\n\nconfigs = dict(LOG_LEVEL=logging.INFO, debug=True, log_function=log_func,\n template_path='views', static_path='statics', static_url_prefix=\n '/statics/', cookie_secret='suoning', xsrf_cookies=True)\n", "step-5": "import logging\n\n\ndef log_func(handler):\n if handler.get_status() < 400:\n log_method = logging.info\n elif handler.get_status() < 500:\n log_method = logging.warning\n else:\n log_method = logging.error\n request_time = 1000.0 * handler.request.request_time()\n log_method(\"%d %s %s (%s) %s %s %.2fms\",\n handler.get_status(), handler.request.method,\n handler.request.uri, handler.request.remote_ip,\n handler.request.arguments,\n request_time)\n\n\nconfigs = dict(\n LOG_LEVEL=logging.INFO, # 日志等级\n debug=True, # Debug\n log_function=log_func, # 日志处理方法\n template_path='views', # html文件\n static_path='statics', # 静态文件(css,js,img)\n static_url_prefix='/statics/', # 静态文件前缀\n cookie_secret='suoning', # cookie自定义字符串加盐\n xsrf_cookies=True, # 防止跨站伪造\n)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
year = int(input('西暦>')) if year % 4 == 0 and year % 100 != 0: print('閏年') pass elif year % 400 == 0: print('閏年') pass else: print('平年') pass
normal
{ "blob_id": "b381d1110e6a7570cd872d689a43aba2d2580a23", "index": 8449, "step-1": "<mask token>\n", "step-2": "<mask token>\nif year % 4 == 0 and year % 100 != 0:\n print('閏年')\n pass\nelif year % 400 == 0:\n print('閏年')\n pass\nelse:\n print('平年')\n pass\n", "step-3": "year = int(input('西暦>'))\nif year % 4 == 0 and year % 100 != 0:\n print('閏年')\n pass\nelif year % 400 == 0:\n print('閏年')\n pass\nelse:\n print('平年')\n pass\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import logging from django.contrib.auth import get_user_model from django.db import models from rest_framework import serializers from rest_framework.test import APITestCase from ..autodocs.docs import ApiDocumentation from .utils import Deferred log = logging.getLogger(__name__) def get_serializer(endpoint, method_name, dict_key='in'): """ Возвращает класс сериалайзера, если тот есть для данного поинта и метода. :param `ApiEndpoint` endpoint: Поинт. :param str method_name: Метод. :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'. :return: Класс сериалайзера либо None. """ methods = [method_name] # Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT. if method_name == 'PATCH': methods.append('PUT') for method in methods: if method in endpoint.serializer_classes and \ isinstance(endpoint.serializer_classes[method], dict) and \ dict_key in endpoint.serializer_classes[method]: return endpoint.serializer_classes[method][dict_key] def resolve_deferred(value): """ Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`. :param any value: Любой объект. """ if isinstance(value, Deferred): obj = model_instance(value.model, value.force_create) return obj.pk elif isinstance(value, dict): return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()} elif isinstance(value, list): return [resolve_deferred(v) for v in value] return value def model_instance(model, force_create=False): """ Создание и получение экземпляра модели. :param any model: Модель. :param bool force_create: Не получать имеющийся объект, а создавать новый. :return: Экзмепляр модели. :rtype: models.Model. """ if not force_create and model.objects.all().count() > 0: return model.objects.first() data = {} for field in model._meta.get_fields(): if not field.auto_created and not field.blank: if hasattr(field, 'choices') and len(field.choices) > 0: data[field.name] = field.choices[0][0] elif isinstance(field, models.IntegerField): data[field.name] = 1 elif isinstance(field, models.ForeignKey): data[field.name] = model_instance(field.related_model) elif isinstance(field, models.CharField): data[field.name] = 'test' return model.objects.create(**data) class AutoTestCase(APITestCase): """ Класс для автоматического тестирования REST ручек. """ @classmethod def setUpClass(cls): """ Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK` """ super(AutoTestCase, cls).setUpClass() model_instance(get_user_model()) def setUp(self): """ Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA и создание / получение необходимых объектов, ключи которых используются в URL. """ self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName) path = self.endpoint.path if '<pk>' in path: obj = model_instance(self.endpoint.callback.cls.queryset.model) path = path.replace('<pk>', str(obj.pk)) self.path = path if hasattr(self.endpoint.callback.cls, 'test_setup'): getattr(self.endpoint.callback.cls, 'test_setup')(self) def base_test_method(self): """ Метод, который проверяет полученный от итератора endpoint. """ request_method = getattr(self.client, self.method.lower()) if self.serializer: if self.request_type == 'all': # Запрос со всеми данными на входе. data = self.prepare_request_data(self.serializer) response = self.send_request(request_method, self.path, data, 'json') self.check_response_is_valid(response) elif self.request_type == 'only_required': # Запрос только с обязательными данными. data = self.prepare_request_data(self.serializer, only_required=True) response = self.send_request(request_method, self.path, data, 'json') self.check_response_is_valid(response) elif self.request_type == 'without_required': # Запрос не со всеми обязательными данными. data = self.prepare_request_data(self.serializer, only_required=True) data.popitem() response = self.send_request(request_method, self.path, data, 'json') self.assertTrue(400 <= response.status_code < 500) else: # Запрос без данных на входе. response = self.send_request(request_method, self.path) self.check_response_is_valid(response) def prepare_request_data(self, field, only_required=False): """ Подготавливает данные для запроса. :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра. :param bool only_required: Использовать ли только обязательные поля. :return: Данные для отправки клиентом. :rtype: list, dict. """ # Если это класс сериалайзера, а не его экземпляр. if isinstance(field, serializers.SerializerMetaclass): return self.prepare_request_data(field()) # Либо имеется тестовое значение установленное через `test_helper_factory`. elif hasattr(field, 'test_helper_value'): return resolve_deferred(field.test_helper_value) # Либо это список. elif isinstance(field, serializers.ListSerializer): return [self.prepare_request_data(field.child)] # Либо это экземпляр сериалайзера. elif isinstance(field, serializers.BaseSerializer): return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \ if (not only_required) or (only_required and v.required)} # Либо это поле. elif isinstance(field, serializers.ChoiceField): for val, verbose in field.choices.items(): return val elif isinstance(field, serializers.PrimaryKeyRelatedField): return model_instance(field.queryset.model).pk elif isinstance(field, serializers.CharField): return 'test' elif isinstance(field, serializers.IntegerField): return 1 def send_request(self, request_method, path, data=None, format_type=None): """ Отправляет запрос. :param method request_method: Метод клиента. :param str path: URL. :param dict data: Данные для запроса. :param str format_type: Формат данных. :return: Ответ. :rtype: `rest_framework.response.Response`. """ kwargs = dict(data=data, format=format_type) if hasattr(self.endpoint.callback.cls, 'test_prepare_request'): kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs) self.data = data print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)] if data is not None: print_strings.append('с данными') log.debug(' '.join(print_strings + ['\n'])) return request_method(path, **kwargs) def check_response_is_valid(self, response): """ Проверяет ответ на успешность и корректность. :param `rest_framework.response.Response` response: Ответ. """ self.assertTrue(200 <= response.status_code < 400) response_serializer = get_serializer(self.endpoint, self.method, 'out') if response_serializer: self.check_response_data(response.data, response_serializer) def check_response_data(self, data, field): """ Проверяем данные в ответе. :param any data: Словарь `Response.data` либо одно из его значений. :param any field: Сериалайзер или поле для сравнения данных в ответе. """ # @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна # т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError. ''' if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \ 'out' in self.endpoint.serializer_classes[method_name]: serializer = self.endpoint.serializer_classes[method_name]['out']( self.endpoint.callback.cls.queryset, many=True) self.assertEqual(response.data, serializer.data) ''' # Если это класс сериалайзера, а не его экземпляр. if isinstance(field, serializers.SerializerMetaclass): return self.check_response_data(data, field()) ''' if 'results' in data and 'count' in data: for item in data['results']: self.check_response_data(item, out_fields) else: for field_name, value in data.items(): try: field_data = fields[field_name] except: import pdb; pdb.set_trace() # Проверка наличия филда среди ожидаемых в ответе self.assertTrue(field_name in available_fields) available_fields.remove(field_name) if field_name in required_fields: required_fields.remove(field_name) if field_data['sub_fields']: if hasattr(field_data['field_instance'], 'test_helper_as_dict'): for key, item in data[field_name].items(): self.check_response_data(item, field_data['sub_fields']) else: self.check_response_data(data[field_name], field_data['sub_fields']) else: field_instance = field_data['field_instance'] # Проверка значения если филд обязателен или имеется значение в ответе if field_data['required'] or value is not None: # Проверка типа филда self.assertEquals(type(field_instance.to_representation(value)), type(value)) # Проверка коррекности значения (иначе возникнет исключение) # self.assertRaises(ValidationError, field_instance.to_internal_value(value)) field_instance.to_internal_value(value) # Проверяем чтобы все обязательные поля в ответе были self.assertEqual(len(required_fields), 0) ''' ENDPOINTS = ApiDocumentation().get_endpoints() ENDPOINTS = [ep for ep in ENDPOINTS] # Собираем список запросов. REQUESTS_LIST = [] for endpoint in ENDPOINTS: for method in endpoint.allowed_methods: serializer = get_serializer(endpoint, method) if serializer: # @TODO: Доработать тестирование без обязательных данных в запросе (without_required). # for request_type in ('all', 'only_required', 'without_required'): for request_type in ('all', 'only_required'): REQUESTS_LIST.append((endpoint, method, serializer, request_type)) else: REQUESTS_LIST.append((endpoint, method, serializer, None)) REQUESTS_DATA = {} # Добавляем для них тестовые методы. for endpoint, method, serializer, request_type in REQUESTS_LIST: method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type) REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type) setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)
normal
{ "blob_id": "04822e735c9c27f0e0fcc9727bcc38d2da84dee6", "index": 7831, "step-1": "<mask token>\n\n\nclass AutoTestCase(APITestCase):\n <mask token>\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n <mask token>\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\n<mask token>\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\n<mask token>\n", "step-4": "<mask token>\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n if method_name == 'PATCH':\n methods.append('PUT')\n for method in methods:\n if method in endpoint.serializer_classes and isinstance(endpoint.\n serializer_classes[method], dict\n ) and dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k, v in value.\n items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = (\n REQUESTS_DATA.get(self._testMethodName))\n path = self.endpoint.path\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n self.path = path\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n if self.serializer:\n if self.request_type == 'all':\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'only_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.check_response_is_valid(response)\n elif self.request_type == 'without_required':\n data = self.prepare_request_data(self.serializer,\n only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path,\n data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n else:\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k, v in field.\n get_fields().items() if not only_required or only_required and\n v.required}\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n elif isinstance(field, serializers.CharField):\n return 'test'\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request'\n )(self, **kwargs)\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__,\n path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n \"\"\"\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n \"\"\"\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n \"\"\"\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n \"\"\"\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\nENDPOINTS = [ep for ep in ENDPOINTS]\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer,\n request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\nREQUESTS_DATA = {}\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method,\n request_type)\n REQUESTS_DATA[method_name] = endpoint, method, serializer, request_type\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n", "step-5": "import logging\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\n\nfrom rest_framework import serializers\nfrom rest_framework.test import APITestCase\n\nfrom ..autodocs.docs import ApiDocumentation\n\nfrom .utils import Deferred\n\nlog = logging.getLogger(__name__)\n\n\ndef get_serializer(endpoint, method_name, dict_key='in'):\n \"\"\"\n Возвращает класс сериалайзера, если тот есть для данного поинта и метода.\n\n :param `ApiEndpoint` endpoint: Поинт.\n :param str method_name: Метод.\n :param str dict_key: Ключ словаря с сериалайзерами, либо 'in' либо 'out'.\n\n :return: Класс сериалайзера либо None.\n\n \"\"\"\n methods = [method_name]\n # Если тестируем PATCH метод и при этом для него нет сериалайзера, используем сериалайзер от PUT.\n if method_name == 'PATCH':\n methods.append('PUT')\n\n for method in methods:\n if method in endpoint.serializer_classes and \\\n isinstance(endpoint.serializer_classes[method], dict) and \\\n dict_key in endpoint.serializer_classes[method]:\n return endpoint.serializer_classes[method][dict_key]\n\n\ndef resolve_deferred(value):\n \"\"\"\n Заменяет `Deferred` объект на pk экземпляра модели `Deferred.model`.\n\n :param any value: Любой объект.\n\n \"\"\"\n if isinstance(value, Deferred):\n obj = model_instance(value.model, value.force_create)\n return obj.pk\n elif isinstance(value, dict):\n return {resolve_deferred(k): resolve_deferred(v) for k,v in value.items()}\n elif isinstance(value, list):\n return [resolve_deferred(v) for v in value]\n return value\n\n\ndef model_instance(model, force_create=False):\n \"\"\"\n Создание и получение экземпляра модели.\n\n :param any model: Модель.\n :param bool force_create: Не получать имеющийся объект, а создавать новый.\n\n :return: Экзмепляр модели.\n :rtype: models.Model.\n\n \"\"\"\n if not force_create and model.objects.all().count() > 0:\n return model.objects.first()\n\n data = {}\n for field in model._meta.get_fields():\n if not field.auto_created and not field.blank:\n if hasattr(field, 'choices') and len(field.choices) > 0:\n data[field.name] = field.choices[0][0]\n\n elif isinstance(field, models.IntegerField):\n data[field.name] = 1\n\n elif isinstance(field, models.ForeignKey):\n data[field.name] = model_instance(field.related_model)\n\n elif isinstance(field, models.CharField):\n data[field.name] = 'test'\n return model.objects.create(**data)\n\n\nclass AutoTestCase(APITestCase):\n \"\"\"\n Класс для автоматического тестирования REST ручек.\n\n \"\"\"\n @classmethod\n def setUpClass(cls):\n \"\"\"\n Создание пользователя для всех тестов, который цепляется через `settings.AUTH_USER_PK`\n\n \"\"\"\n super(AutoTestCase, cls).setUpClass()\n model_instance(get_user_model())\n\n def setUp(self):\n \"\"\"\n Подготовка к тестовому запросу, получение данных из словаря REQUESTS_DATA\n и создание / получение необходимых объектов, ключи которых используются в URL.\n\n \"\"\"\n self.endpoint, self.method, self.serializer, self.request_type = REQUESTS_DATA.get(self._testMethodName)\n\n path = self.endpoint.path\n\n if '<pk>' in path:\n obj = model_instance(self.endpoint.callback.cls.queryset.model)\n path = path.replace('<pk>', str(obj.pk))\n\n self.path = path\n\n if hasattr(self.endpoint.callback.cls, 'test_setup'):\n getattr(self.endpoint.callback.cls, 'test_setup')(self)\n\n def base_test_method(self):\n \"\"\"\n Метод, который проверяет полученный от итератора endpoint.\n\n \"\"\"\n request_method = getattr(self.client, self.method.lower())\n\n if self.serializer:\n if self.request_type == 'all':\n # Запрос со всеми данными на входе.\n data = self.prepare_request_data(self.serializer)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'only_required':\n # Запрос только с обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n response = self.send_request(request_method, self.path, data, 'json')\n self.check_response_is_valid(response)\n\n elif self.request_type == 'without_required':\n # Запрос не со всеми обязательными данными.\n data = self.prepare_request_data(self.serializer, only_required=True)\n data.popitem()\n response = self.send_request(request_method, self.path, data, 'json')\n self.assertTrue(400 <= response.status_code < 500)\n\n else:\n # Запрос без данных на входе.\n response = self.send_request(request_method, self.path)\n self.check_response_is_valid(response)\n\n def prepare_request_data(self, field, only_required=False):\n \"\"\"\n Подготавливает данные для запроса.\n\n :param rest_framework.fields.Field, rest_framework.serializers.Serializer field: Объект филда или сериалазейра.\n :param bool only_required: Использовать ли только обязательные поля.\n\n :return: Данные для отправки клиентом.\n :rtype: list, dict.\n\n \"\"\"\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.prepare_request_data(field())\n\n # Либо имеется тестовое значение установленное через `test_helper_factory`.\n elif hasattr(field, 'test_helper_value'):\n return resolve_deferred(field.test_helper_value)\n\n # Либо это список.\n elif isinstance(field, serializers.ListSerializer):\n return [self.prepare_request_data(field.child)]\n\n # Либо это экземпляр сериалайзера.\n elif isinstance(field, serializers.BaseSerializer):\n return {k: self.prepare_request_data(v) for k,v in field.get_fields().items() \\\n if (not only_required) or (only_required and v.required)}\n\n # Либо это поле.\n elif isinstance(field, serializers.ChoiceField):\n for val, verbose in field.choices.items():\n return val\n\n elif isinstance(field, serializers.PrimaryKeyRelatedField):\n return model_instance(field.queryset.model).pk\n\n elif isinstance(field, serializers.CharField):\n return 'test'\n\n elif isinstance(field, serializers.IntegerField):\n return 1\n\n def send_request(self, request_method, path, data=None, format_type=None):\n \"\"\"\n Отправляет запрос.\n\n :param method request_method: Метод клиента.\n :param str path: URL.\n :param dict data: Данные для запроса.\n :param str format_type: Формат данных.\n\n :return: Ответ.\n :rtype: `rest_framework.response.Response`.\n\n \"\"\"\n kwargs = dict(data=data, format=format_type)\n if hasattr(self.endpoint.callback.cls, 'test_prepare_request'):\n kwargs = getattr(self.endpoint.callback.cls, 'test_prepare_request')(self, **kwargs)\n\n self.data = data\n print_strings = ['Отправка {} на {}'.format(request_method.__name__, path)]\n if data is not None:\n print_strings.append('с данными')\n log.debug(' '.join(print_strings + ['\\n']))\n return request_method(path, **kwargs)\n\n def check_response_is_valid(self, response):\n \"\"\"\n Проверяет ответ на успешность и корректность.\n\n :param `rest_framework.response.Response` response: Ответ.\n\n \"\"\"\n self.assertTrue(200 <= response.status_code < 400)\n\n response_serializer = get_serializer(self.endpoint, self.method, 'out')\n if response_serializer:\n self.check_response_data(response.data, response_serializer)\n\n def check_response_data(self, data, field):\n \"\"\"\n Проверяем данные в ответе.\n\n :param any data: Словарь `Response.data` либо одно из его значений.\n :param any field: Сериалайзер или поле для сравнения данных в ответе.\n\n \"\"\"\n # @TODO: Проверка с помощью данных сериалайзера на данный момент не возможна\n # т.к. что-то происходит с QuerySet'ом из-за чего serializer.data вызывает RuntimeError.\n '''\n if method_name == 'POST' and method_name in self.endpoint.serializer_classes and \\\n 'out' in self.endpoint.serializer_classes[method_name]:\n serializer = self.endpoint.serializer_classes[method_name]['out'](\n self.endpoint.callback.cls.queryset, many=True)\n self.assertEqual(response.data, serializer.data)\n '''\n # Если это класс сериалайзера, а не его экземпляр.\n if isinstance(field, serializers.SerializerMetaclass):\n return self.check_response_data(data, field())\n\n '''\n if 'results' in data and 'count' in data:\n for item in data['results']:\n self.check_response_data(item, out_fields)\n\n else:\n for field_name, value in data.items():\n try:\n field_data = fields[field_name]\n except:\n import pdb; pdb.set_trace()\n # Проверка наличия филда среди ожидаемых в ответе\n self.assertTrue(field_name in available_fields)\n available_fields.remove(field_name)\n\n if field_name in required_fields:\n required_fields.remove(field_name)\n\n if field_data['sub_fields']:\n if hasattr(field_data['field_instance'], 'test_helper_as_dict'):\n for key, item in data[field_name].items():\n self.check_response_data(item, field_data['sub_fields'])\n else:\n self.check_response_data(data[field_name], field_data['sub_fields'])\n\n else:\n field_instance = field_data['field_instance']\n\n # Проверка значения если филд обязателен или имеется значение в ответе\n if field_data['required'] or value is not None:\n # Проверка типа филда\n self.assertEquals(type(field_instance.to_representation(value)), type(value))\n\n # Проверка коррекности значения (иначе возникнет исключение)\n # self.assertRaises(ValidationError, field_instance.to_internal_value(value))\n field_instance.to_internal_value(value)\n\n # Проверяем чтобы все обязательные поля в ответе были\n self.assertEqual(len(required_fields), 0)\n '''\n\n\nENDPOINTS = ApiDocumentation().get_endpoints()\n\nENDPOINTS = [ep for ep in ENDPOINTS]\n\n# Собираем список запросов.\nREQUESTS_LIST = []\nfor endpoint in ENDPOINTS:\n for method in endpoint.allowed_methods:\n serializer = get_serializer(endpoint, method)\n if serializer:\n # @TODO: Доработать тестирование без обязательных данных в запросе (without_required).\n # for request_type in ('all', 'only_required', 'without_required'):\n for request_type in ('all', 'only_required'):\n REQUESTS_LIST.append((endpoint, method, serializer, request_type))\n else:\n REQUESTS_LIST.append((endpoint, method, serializer, None))\n\nREQUESTS_DATA = {}\n# Добавляем для них тестовые методы.\nfor endpoint, method, serializer, request_type in REQUESTS_LIST:\n method_name = 'test_{}_{}_{}'.format(endpoint.callback.__name__, method, request_type)\n REQUESTS_DATA[method_name] = (endpoint, method, serializer, request_type)\n setattr(AutoTestCase, method_name, AutoTestCase.base_test_method)\n", "step-ids": [ 6, 11, 12, 14, 16 ] }
[ 6, 11, 12, 14, 16 ]
# -*- coding: utf-8 -*- import unittest import torch from pythainlp.transliterate import romanize, transliterate, pronunciate, puan from pythainlp.transliterate.ipa import trans_list, xsampa_list from pythainlp.transliterate.thai2rom import ThaiTransliterator from pythainlp.corpus import remove _BASIC_TESTS = { None: "", "": "", "abc": "abc", "หมอก": "mok", "หาย": "hai", "แมว": "maeo", "เดือน": "duean", "ดำ": "dam", "ดู": "du", "บัว": "bua", "กก": "kok", "พร": "phon", "กร": "kon", "กรร": "kan", "กรรม": "kam", # "กรม": "krom", # failed "ฝ้าย": "fai", "นพพร": "nopphon", "อัก": "ak", # "ทีปกร": "thipakon", # failed # "ธรรพ์": "than", # failed # "ธรรม": "tham", # failed # "มหา": "maha", # failed # "หยาก": "yak", # failed # "อยาก": "yak", # failed # "ยมก": "yamok", # failed # "กลัว": "klua", # failed # "บ้านไร่": "banrai", # failed # "ชารินทร์": "charin", # failed } # these are set of two-syllable words, # to test if the transliteration/romanization is consistent, say # romanize(1+2) = romanize(1) + romanize(2) _CONSISTENCY_TESTS = [ # ("กระจก", "กระ", "จก"), # failed # ("ระเบิด", "ระ", "เบิด"), # failed # ("หยากไย่", "หยาก", "ไย่"), # failed ("ตากใบ", "ตาก", "ใบ"), # ("จัดสรร", "จัด", "สรร"), # failed ] class TestTransliteratePackage(unittest.TestCase): def test_romanize(self): self.assertEqual(romanize(None), "") self.assertEqual(romanize(""), "") self.assertEqual(romanize("แมว"), "maeo") self.assertEqual(romanize("แมว", engine="tltk"), "maeo") def test_romanize_royin_basic(self): for word in _BASIC_TESTS: expect = _BASIC_TESTS[word] self.assertEqual(romanize(word, engine="royin"), expect) def test_romanize_royin_consistency(self): for word, part1, part2 in _CONSISTENCY_TESTS: self.assertEqual( romanize(word, engine="royin"), ( romanize(part1, engine="royin") + romanize(part2, engine="royin") ), ) def test_romanize_thai2rom(self): self.assertEqual(romanize("แมว", engine="thai2rom"), "maeo") self.assertEqual(romanize("บ้านไร่", engine="thai2rom"), "banrai") self.assertEqual(romanize("สุนัข", engine="thai2rom"), "sunak") self.assertEqual(romanize("นก", engine="thai2rom"), "nok") self.assertEqual(romanize("ความอิ่ม", engine="thai2rom"), "khwam-im") self.assertEqual( romanize("กานต์ ณรงค์", engine="thai2rom"), "kan narong" ) self.assertEqual(romanize("สกุนต์", engine="thai2rom"), "sakun") self.assertEqual(romanize("ชารินทร์", engine="thai2rom"), "charin") def test_thai2rom_prepare_sequence(self): transliterater = ThaiTransliterator() UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1 END_TOKEN = 3 # END_TOKEN or <end> is represented by 3 self.assertListEqual( transliterater._prepare_sequence_in("A") .cpu() .detach() .numpy() .tolist(), torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long) .cpu() .detach() .numpy() .tolist(), ) self.assertListEqual( transliterater._prepare_sequence_in("♥") .cpu() .detach() .numpy() .tolist(), torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long) .cpu() .detach() .numpy() .tolist(), ) self.assertNotEqual( transliterater._prepare_sequence_in("ก") .cpu() .detach() .numpy() .tolist(), torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long) .cpu() .detach() .numpy() .tolist(), ) def test_transliterate(self): self.assertEqual(transliterate(""), "") self.assertEqual(transliterate("แมว", "pyicu"), "mæw") self.assertEqual(transliterate("คน", engine="ipa"), "kʰon") self.assertIsNotNone(transliterate("คน", engine="thaig2p")) self.assertIsNotNone(transliterate("แมว", engine="thaig2p")) self.assertIsNotNone(transliterate("คน", engine="tltk_g2p")) self.assertIsNotNone(transliterate("แมว", engine="tltk_g2p")) self.assertIsNotNone(transliterate("คน", engine="tltk_ipa")) self.assertIsNotNone(transliterate("แมว", engine="tltk_ipa")) self.assertIsNotNone(trans_list("คน")) self.assertIsNotNone(xsampa_list("คน")) def test_pronunciate(self): self.assertEqual(pronunciate(""), "") remove("thai_w2p") self.assertIsNotNone(pronunciate("คน", engine="w2p")) self.assertIsNotNone(pronunciate("แมว", engine="w2p")) self.assertIsNotNone(pronunciate("มข.", engine="w2p")) self.assertIsNotNone(pronunciate("มช.", engine="w2p")) self.assertIsNotNone(pronunciate("jks", engine="w2p")) def test_puan(self): self.assertEqual(puan("นาริน"), "นิน-รา") self.assertEqual(puan("นาริน", False), "นินรา") self.assertEqual(puan("แสงดีนะ"), "แสง-ดะ-นี") self.assertEqual(puan("แสงดีนะ", False), "แสงดะนี") with self.assertRaises(ValueError): self.assertEqual(puan("สวัสดีครับ"), "สวัสดีครับ")
normal
{ "blob_id": "486cfc4bb4b46d78715b11cba44656e8ba077c9b", "index": 2551, "step-1": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n <mask token>\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n <mask token>\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n", "step-3": "<mask token>\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n", "step-4": "import unittest\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n_BASIC_TESTS = {None: '', '': '', 'abc': 'abc', 'หมอก': 'mok', 'หาย': 'hai',\n 'แมว': 'maeo', 'เดือน': 'duean', 'ดำ': 'dam', 'ดู': 'du', 'บัว': 'bua',\n 'กก': 'kok', 'พร': 'phon', 'กร': 'kon', 'กรร': 'kan', 'กรรม': 'kam',\n 'ฝ้าย': 'fai', 'นพพร': 'nopphon', 'อัก': 'ak'}\n_CONSISTENCY_TESTS = [('ตากใบ', 'ตาก', 'ใบ')]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n\n def test_romanize(self):\n self.assertEqual(romanize(None), '')\n self.assertEqual(romanize(''), '')\n self.assertEqual(romanize('แมว'), 'maeo')\n self.assertEqual(romanize('แมว', engine='tltk'), 'maeo')\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine='royin'), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(romanize(word, engine='royin'), romanize(part1,\n engine='royin') + romanize(part2, engine='royin'))\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize('แมว', engine='thai2rom'), 'maeo')\n self.assertEqual(romanize('บ้านไร่', engine='thai2rom'), 'banrai')\n self.assertEqual(romanize('สุนัข', engine='thai2rom'), 'sunak')\n self.assertEqual(romanize('นก', engine='thai2rom'), 'nok')\n self.assertEqual(romanize('ความอิ่ม', engine='thai2rom'), 'khwam-im')\n self.assertEqual(romanize('กานต์ ณรงค์', engine='thai2rom'),\n 'kan narong')\n self.assertEqual(romanize('สกุนต์', engine='thai2rom'), 'sakun')\n self.assertEqual(romanize('ชารินทร์', engine='thai2rom'), 'charin')\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n UNK_TOKEN = 1\n END_TOKEN = 3\n self.assertListEqual(transliterater._prepare_sequence_in('A').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertListEqual(transliterater._prepare_sequence_in('♥').cpu()\n .detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n self.assertNotEqual(transliterater._prepare_sequence_in('ก').cpu().\n detach().numpy().tolist(), torch.tensor([UNK_TOKEN, END_TOKEN],\n dtype=torch.long).cpu().detach().numpy().tolist())\n\n def test_transliterate(self):\n self.assertEqual(transliterate(''), '')\n self.assertEqual(transliterate('แมว', 'pyicu'), 'mæw')\n self.assertEqual(transliterate('คน', engine='ipa'), 'kʰon')\n self.assertIsNotNone(transliterate('คน', engine='thaig2p'))\n self.assertIsNotNone(transliterate('แมว', engine='thaig2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_g2p'))\n self.assertIsNotNone(transliterate('คน', engine='tltk_ipa'))\n self.assertIsNotNone(transliterate('แมว', engine='tltk_ipa'))\n self.assertIsNotNone(trans_list('คน'))\n self.assertIsNotNone(xsampa_list('คน'))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(''), '')\n remove('thai_w2p')\n self.assertIsNotNone(pronunciate('คน', engine='w2p'))\n self.assertIsNotNone(pronunciate('แมว', engine='w2p'))\n self.assertIsNotNone(pronunciate('มข.', engine='w2p'))\n self.assertIsNotNone(pronunciate('มช.', engine='w2p'))\n self.assertIsNotNone(pronunciate('jks', engine='w2p'))\n\n def test_puan(self):\n self.assertEqual(puan('นาริน'), 'นิน-รา')\n self.assertEqual(puan('นาริน', False), 'นินรา')\n self.assertEqual(puan('แสงดีนะ'), 'แสง-ดะ-นี')\n self.assertEqual(puan('แสงดีนะ', False), 'แสงดะนี')\n with self.assertRaises(ValueError):\n self.assertEqual(puan('สวัสดีครับ'), 'สวัสดีครับ')\n", "step-5": "# -*- coding: utf-8 -*-\n\nimport unittest\n\nimport torch\nfrom pythainlp.transliterate import romanize, transliterate, pronunciate, puan\nfrom pythainlp.transliterate.ipa import trans_list, xsampa_list\nfrom pythainlp.transliterate.thai2rom import ThaiTransliterator\nfrom pythainlp.corpus import remove\n\n_BASIC_TESTS = {\n None: \"\",\n \"\": \"\",\n \"abc\": \"abc\",\n \"หมอก\": \"mok\",\n \"หาย\": \"hai\",\n \"แมว\": \"maeo\",\n \"เดือน\": \"duean\",\n \"ดำ\": \"dam\",\n \"ดู\": \"du\",\n \"บัว\": \"bua\",\n \"กก\": \"kok\",\n \"พร\": \"phon\",\n \"กร\": \"kon\",\n \"กรร\": \"kan\",\n \"กรรม\": \"kam\",\n # \"กรม\": \"krom\", # failed\n \"ฝ้าย\": \"fai\",\n \"นพพร\": \"nopphon\",\n \"อัก\": \"ak\",\n # \"ทีปกร\": \"thipakon\", # failed\n # \"ธรรพ์\": \"than\", # failed\n # \"ธรรม\": \"tham\", # failed\n # \"มหา\": \"maha\", # failed\n # \"หยาก\": \"yak\", # failed\n # \"อยาก\": \"yak\", # failed\n # \"ยมก\": \"yamok\", # failed\n # \"กลัว\": \"klua\", # failed\n # \"บ้านไร่\": \"banrai\", # failed\n # \"ชารินทร์\": \"charin\", # failed\n}\n\n# these are set of two-syllable words,\n# to test if the transliteration/romanization is consistent, say\n# romanize(1+2) = romanize(1) + romanize(2)\n_CONSISTENCY_TESTS = [\n # (\"กระจก\", \"กระ\", \"จก\"), # failed\n # (\"ระเบิด\", \"ระ\", \"เบิด\"), # failed\n # (\"หยากไย่\", \"หยาก\", \"ไย่\"), # failed\n (\"ตากใบ\", \"ตาก\", \"ใบ\"),\n # (\"จัดสรร\", \"จัด\", \"สรร\"), # failed\n]\n\n\nclass TestTransliteratePackage(unittest.TestCase):\n def test_romanize(self):\n self.assertEqual(romanize(None), \"\")\n self.assertEqual(romanize(\"\"), \"\")\n self.assertEqual(romanize(\"แมว\"), \"maeo\")\n self.assertEqual(romanize(\"แมว\", engine=\"tltk\"), \"maeo\")\n\n def test_romanize_royin_basic(self):\n for word in _BASIC_TESTS:\n expect = _BASIC_TESTS[word]\n self.assertEqual(romanize(word, engine=\"royin\"), expect)\n\n def test_romanize_royin_consistency(self):\n for word, part1, part2 in _CONSISTENCY_TESTS:\n self.assertEqual(\n romanize(word, engine=\"royin\"),\n (\n romanize(part1, engine=\"royin\")\n + romanize(part2, engine=\"royin\")\n ),\n )\n\n def test_romanize_thai2rom(self):\n self.assertEqual(romanize(\"แมว\", engine=\"thai2rom\"), \"maeo\")\n self.assertEqual(romanize(\"บ้านไร่\", engine=\"thai2rom\"), \"banrai\")\n self.assertEqual(romanize(\"สุนัข\", engine=\"thai2rom\"), \"sunak\")\n self.assertEqual(romanize(\"นก\", engine=\"thai2rom\"), \"nok\")\n self.assertEqual(romanize(\"ความอิ่ม\", engine=\"thai2rom\"), \"khwam-im\")\n self.assertEqual(\n romanize(\"กานต์ ณรงค์\", engine=\"thai2rom\"), \"kan narong\"\n )\n self.assertEqual(romanize(\"สกุนต์\", engine=\"thai2rom\"), \"sakun\")\n self.assertEqual(romanize(\"ชารินทร์\", engine=\"thai2rom\"), \"charin\")\n\n def test_thai2rom_prepare_sequence(self):\n transliterater = ThaiTransliterator()\n\n UNK_TOKEN = 1 # UNK_TOKEN or <UNK> is represented by 1\n END_TOKEN = 3 # END_TOKEN or <end> is represented by 3\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"A\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertListEqual(\n transliterater._prepare_sequence_in(\"♥\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n self.assertNotEqual(\n transliterater._prepare_sequence_in(\"ก\")\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n torch.tensor([UNK_TOKEN, END_TOKEN], dtype=torch.long)\n .cpu()\n .detach()\n .numpy()\n .tolist(),\n )\n\n def test_transliterate(self):\n self.assertEqual(transliterate(\"\"), \"\")\n self.assertEqual(transliterate(\"แมว\", \"pyicu\"), \"mæw\")\n self.assertEqual(transliterate(\"คน\", engine=\"ipa\"), \"kʰon\")\n self.assertIsNotNone(transliterate(\"คน\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"thaig2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_g2p\"))\n self.assertIsNotNone(transliterate(\"คน\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(transliterate(\"แมว\", engine=\"tltk_ipa\"))\n self.assertIsNotNone(trans_list(\"คน\"))\n self.assertIsNotNone(xsampa_list(\"คน\"))\n\n def test_pronunciate(self):\n self.assertEqual(pronunciate(\"\"), \"\")\n remove(\"thai_w2p\")\n self.assertIsNotNone(pronunciate(\"คน\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"แมว\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มข.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"มช.\", engine=\"w2p\"))\n self.assertIsNotNone(pronunciate(\"jks\", engine=\"w2p\"))\n\n def test_puan(self):\n self.assertEqual(puan(\"นาริน\"), \"นิน-รา\")\n self.assertEqual(puan(\"นาริน\", False), \"นินรา\")\n self.assertEqual(puan(\"แสงดีนะ\"), \"แสง-ดะ-นี\")\n self.assertEqual(puan(\"แสงดีนะ\", False), \"แสงดะนี\")\n with self.assertRaises(ValueError):\n self.assertEqual(puan(\"สวัสดีครับ\"), \"สวัสดีครับ\")\n", "step-ids": [ 6, 8, 10, 11, 12 ] }
[ 6, 8, 10, 11, 12 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2017/6/20 下午4:00 # @Author : Huang HUi # @Site : # @File : query_parse.py # @Software: PyCharm from mysqlConnection import mysqlConnection import yaml import copy import time import csv import json from collections import OrderedDict import ast # # GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}], # 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [], # 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [], # 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None} # GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None}, # {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]} # GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}], # 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}], # 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]} def query_parse(GIVEN_QUERY): try: countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries'])) except : countryIds_query=None try: days_query=GIVEN_QUERY['days'] except : days_query=None try: regions_query = GIVEN_QUERY['regions'] except : regions_query=[] try: regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query)) except : regionDic_query=[] try: pois_query=GIVEN_QUERY['pois'] except : pois_query=[] try: regionNotGo_query=GIVEN_QUERY['regionNotGo'] except : regionNotGo_query=[] try: poiNotGo_query=GIVEN_QUERY['poiNotGo'] except : poiNotGo_query=[] try: regionSorted_query=GIVEN_QUERY['regionSorted'] except : regionSorted_query=[] try: availableMonths_query=GIVEN_QUERY['availableMonths'] except : availableMonths_query=[] try: price_query=GIVEN_QUERY['price'] except : price_query=None try: hotelRating_query=GIVEN_QUERY['hotelRating'] except : hotelRating_query=None try: arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId'] except : arrivalRegionId_query=None try: departRegionId_query=GIVEN_QUERY['departRegionId'] except: departRegionId_query=None connection=mysqlConnection() try: with connection.cursor() as cursor: if GIVEN_QUERY['countries']: # country condition if arrivalRegionId_query: sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (arrivalRegionId_query,str(countryIds_query)[1:-1]) else: sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1] else: # all sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " cursor.execute(sql) startParts = cursor.fetchall() if GIVEN_QUERY['countries']: if departRegionId_query: sql = "SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)" % (departRegionId_query, str(countryIds_query)[1:-1]) else: sql = "SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)" % str(countryIds_query)[1:-1] else: sql = "SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null " cursor.execute(sql) endParts = cursor.fetchall() finally: connection.close() startParts = [dict['id'] for dict in startParts] endParts = [dict['id'] for dict in endParts] return countryIds_query, days_query, regions_query, regionDic_query, \ pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \ hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts
normal
{ "blob_id": "b52807a15cef8f07f685f8761a470d4a24d9c3dc", "index": 6603, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n", "step-3": "from mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n\ndef query_parse(GIVEN_QUERY):\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY[\n 'countries']))\n except:\n countryIds_query = None\n try:\n days_query = GIVEN_QUERY['days']\n except:\n days_query = None\n try:\n regions_query = GIVEN_QUERY['regions']\n except:\n regions_query = []\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']},\n regions_query))\n except:\n regionDic_query = []\n try:\n pois_query = GIVEN_QUERY['pois']\n except:\n pois_query = []\n try:\n regionNotGo_query = GIVEN_QUERY['regionNotGo']\n except:\n regionNotGo_query = []\n try:\n poiNotGo_query = GIVEN_QUERY['poiNotGo']\n except:\n poiNotGo_query = []\n try:\n regionSorted_query = GIVEN_QUERY['regionSorted']\n except:\n regionSorted_query = []\n try:\n availableMonths_query = GIVEN_QUERY['availableMonths']\n except:\n availableMonths_query = []\n try:\n price_query = GIVEN_QUERY['price']\n except:\n price_query = None\n try:\n hotelRating_query = GIVEN_QUERY['hotelRating']\n except:\n hotelRating_query = None\n try:\n arrivalRegionId_query = GIVEN_QUERY['arrivalRegionId']\n except:\n arrivalRegionId_query = None\n try:\n departRegionId_query = GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query = None\n connection = mysqlConnection()\n try:\n with connection.cursor() as cursor:\n if GIVEN_QUERY['countries']:\n if arrivalRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (arrivalRegionId_query, str(countryIds_query)[1:-1])\n )\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = (\n \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\"\n % (departRegionId_query, str(countryIds_query)[1:-1]))\n else:\n sql = (\n \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\"\n % str(countryIds_query)[1:-1])\n else:\n sql = (\n \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n )\n cursor.execute(sql)\n endParts = cursor.fetchall()\n finally:\n connection.close()\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n return (countryIds_query, days_query, regions_query, regionDic_query,\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query,\n availableMonths_query, price_query, hotelRating_query,\n arrivalRegionId_query, departRegionId_query, startParts, endParts)\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/6/20 下午4:00\n# @Author : Huang HUi\n# @Site : \n# @File : query_parse.py\n# @Software: PyCharm\nfrom mysqlConnection import mysqlConnection\nimport yaml\nimport copy\nimport time\nimport csv\nimport json\nfrom collections import OrderedDict\nimport ast\n\n#\n# GIVEN_QUERY = {'days': [10,14], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 27, 'day': None}, {'region_id': 69, 'day': None}], 'pois': [],\n# 'regionNotGo': [], 'poiNotGo': [], 'regionSorted': [135, 131], 'availableMonths': [],\n# 'price': [5000, 15000], 'hotelRating': None, 'arrivalRegionId': 27, 'departRegionId': None}\n\n# GIVEN_QUERY={'days': [10,13], 'countries': [{'country_id': 11, 'day': None}], 'regions': [{'region_id': 266, 'day': None},\n# {'region_id': 220, 'day': None}], 'pois': [795, 800,878,1536]}\n\n# GIVEN_QUERY={'days': [12], 'countries': [{'country_id': 28, 'day': None}],\n# 'regions': [{'region_id': 2, 'day': None}, {'region_id': 70, 'day': None}],\n# 'pois': [1361, 1380, 1382, 1385, 1386, 1413, 1512, 1700, 1701, 1712, 1713]}\n\ndef query_parse(GIVEN_QUERY):\n\n try:\n countryIds_query = list(map(lambda x: x['country_id'], GIVEN_QUERY['countries']))\n except :\n countryIds_query=None\n try:\n days_query=GIVEN_QUERY['days']\n except :\n days_query=None\n try:\n regions_query = GIVEN_QUERY['regions']\n except :\n regions_query=[]\n try:\n regionDic_query = list(map(lambda x: {x['region_id']: x['day']}, regions_query))\n except :\n regionDic_query=[]\n try:\n pois_query=GIVEN_QUERY['pois']\n except :\n pois_query=[]\n try:\n regionNotGo_query=GIVEN_QUERY['regionNotGo']\n except :\n regionNotGo_query=[]\n try:\n poiNotGo_query=GIVEN_QUERY['poiNotGo']\n except :\n poiNotGo_query=[]\n try:\n regionSorted_query=GIVEN_QUERY['regionSorted']\n except :\n regionSorted_query=[]\n try:\n availableMonths_query=GIVEN_QUERY['availableMonths']\n except :\n availableMonths_query=[]\n try:\n price_query=GIVEN_QUERY['price']\n except :\n price_query=None\n try:\n hotelRating_query=GIVEN_QUERY['hotelRating']\n except :\n hotelRating_query=None\n try:\n arrivalRegionId_query=GIVEN_QUERY['arrivalRegionId']\n except :\n arrivalRegionId_query=None\n try:\n departRegionId_query=GIVEN_QUERY['departRegionId']\n except:\n departRegionId_query=None\n\n\n connection=mysqlConnection()\n try:\n with connection.cursor() as cursor:\n\n if GIVEN_QUERY['countries']:\n # country condition\n if arrivalRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (arrivalRegionId_query,str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n # all\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_start = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n startParts = cursor.fetchall()\n if GIVEN_QUERY['countries']:\n if departRegionId_query:\n sql = \"SELECT tidy_parts.id as id, country_id,region_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and region_id =(%s) and country_id in (%s)\" % (departRegionId_query, str(countryIds_query)[1:-1])\n else:\n sql = \"SELECT tidy_parts.id as id, country_id FROM tidy_parts join regions on tidy_parts.region_id = regions.id WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null and country_id in (%s)\" % str(countryIds_query)[1:-1]\n else:\n sql = \"SELECT id FROM tidy_parts WHERE tidy_parts.is_end = 1 and tidy_parts.poi_ids is not NULL and tidy_parts.state!='canceled' and tidy_parts.deleted_at is null \"\n cursor.execute(sql)\n endParts = cursor.fetchall()\n\n\n\n\n finally:\n connection.close()\n\n startParts = [dict['id'] for dict in startParts]\n endParts = [dict['id'] for dict in endParts]\n\n\n return countryIds_query, days_query, regions_query, regionDic_query, \\\n pois_query, regionNotGo_query, poiNotGo_query, regionSorted_query, availableMonths_query, price_query, \\\n hotelRating_query, arrivalRegionId_query, departRegionId_query,startParts,endParts\n\n\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import urllib class LicenseChecker( object ): def __init__( self ): self.url = 'http://logon.guidoaccardo.com.ar/' self.count_offline = 15 def __countTimes( self ): ff = open( 'times.ehead', 'r' ) bb = ff.read() ff.close() return int( bb ) def __updateTimes( self, times ): actual = self.__countTimes() ff = open( 'times.ehead', 'w' ) ff.write( str( actual-times ) ) ff.close() def isActive( self ): try: site = urllib.urlopen( self.url ) content = site.readlines() site.close() except IOError: if not self.__countTimes() == 0: self.__updateTimes( 1 ) return { 'active':True, 'msg':'Ejecutando sin conexion.' } else: return { 'active':False, 'msg':'Ejecutado demasiadas veces sin conexion.' } if content[0].strip() == 'ACTIVE': self.__updateTimes( self.count_offline ) return { 'active':True, 'msg':'Iniciando Sistema' } else: return { 'active':False, 'msg':content[0].strip() }
normal
{ "blob_id": "c70aa1a373530ac73553753e62d3989f5bc79287", "index": 687, "step-1": "<mask token>\n\n\nclass LicenseChecker(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass LicenseChecker(object):\n <mask token>\n <mask token>\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-3": "<mask token>\n\n\nclass LicenseChecker(object):\n\n def __init__(self):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes(self):\n ff = open('times.ehead', 'r')\n bb = ff.read()\n ff.close()\n return int(bb)\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-4": "import urllib\n\n\nclass LicenseChecker(object):\n\n def __init__(self):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes(self):\n ff = open('times.ehead', 'r')\n bb = ff.read()\n ff.close()\n return int(bb)\n\n def __updateTimes(self, times):\n actual = self.__countTimes()\n ff = open('times.ehead', 'w')\n ff.write(str(actual - times))\n ff.close()\n\n def isActive(self):\n try:\n site = urllib.urlopen(self.url)\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes(1)\n return {'active': True, 'msg': 'Ejecutando sin conexion.'}\n else:\n return {'active': False, 'msg':\n 'Ejecutado demasiadas veces sin conexion.'}\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes(self.count_offline)\n return {'active': True, 'msg': 'Iniciando Sistema'}\n else:\n return {'active': False, 'msg': content[0].strip()}\n", "step-5": "#!/usr/bin/env python\n\nimport urllib\n\nclass LicenseChecker( object ):\n\n def __init__( self ):\n self.url = 'http://logon.guidoaccardo.com.ar/'\n self.count_offline = 15\n\n def __countTimes( self ):\n ff = open( 'times.ehead', 'r' )\n bb = ff.read()\n ff.close()\n\n return int( bb )\n\n def __updateTimes( self, times ):\n actual = self.__countTimes()\n ff = open( 'times.ehead', 'w' )\n ff.write( str( actual-times ) )\n ff.close()\n\n def isActive( self ):\n try:\n site = urllib.urlopen( self.url )\n content = site.readlines()\n site.close()\n except IOError:\n if not self.__countTimes() == 0:\n self.__updateTimes( 1 )\n return { 'active':True, 'msg':'Ejecutando sin conexion.' }\n else:\n return { 'active':False, 'msg':'Ejecutado demasiadas veces sin conexion.' }\n\n if content[0].strip() == 'ACTIVE':\n self.__updateTimes( self.count_offline )\n return { 'active':True, 'msg':'Iniciando Sistema' }\n else:\n return { 'active':False, 'msg':content[0].strip() }\n", "step-ids": [ 1, 3, 5, 6, 7 ] }
[ 1, 3, 5, 6, 7 ]
from dbmanager import DbManager from message import Message def list_returner(f): def wrapper(*args, **kwargs): result = f(*args, **kwargs) if result: return result else: return [dict()] return wrapper class Messenger: def __init__(self, messages_count=20): self.messages_count = messages_count self.message_manager = DbManager() def add_message(self, message): self.message_manager.add(message) @list_returner def get_room_messages(self): messages = [] i = 6 found_messages = [] for message in self.message_manager.find({}, self.messages_count): found_messages.append(Message(**message)) for message in sorted(found_messages): message_dict = message.as_dict() message_dict['id'] = i messages.append(message_dict) i += 1 return messages def read_messages(self): output = list() messages = self.get_room_messages() output.append('[' + ', '.join([str(message).replace("'", '"') for message in messages]) + ']') return output def delete_message(self, message): try: self.message_manager.delete(message) except: print('No message') def update_message(self, old_message, message): self.message_manager.update(old_message, message)
normal
{ "blob_id": "4d1ea6522a01603f0159a1f27da70b65c4f387cb", "index": 7093, "step-1": "<mask token>\n\n\nclass Messenger:\n <mask token>\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n <mask token>\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n", "step-2": "<mask token>\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n", "step-3": "<mask token>\n\n\ndef list_returner(f):\n\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if result:\n return result\n else:\n return [dict()]\n return wrapper\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n", "step-4": "from dbmanager import DbManager\nfrom message import Message\n\n\ndef list_returner(f):\n\n def wrapper(*args, **kwargs):\n result = f(*args, **kwargs)\n if result:\n return result\n else:\n return [dict()]\n return wrapper\n\n\nclass Messenger:\n\n def __init__(self, messages_count=20):\n self.messages_count = messages_count\n self.message_manager = DbManager()\n\n def add_message(self, message):\n self.message_manager.add(message)\n\n @list_returner\n def get_room_messages(self):\n messages = []\n i = 6\n found_messages = []\n for message in self.message_manager.find({}, self.messages_count):\n found_messages.append(Message(**message))\n for message in sorted(found_messages):\n message_dict = message.as_dict()\n message_dict['id'] = i\n messages.append(message_dict)\n i += 1\n return messages\n\n def read_messages(self):\n output = list()\n messages = self.get_room_messages()\n output.append('[' + ', '.join([str(message).replace(\"'\", '\"') for\n message in messages]) + ']')\n return output\n\n def delete_message(self, message):\n try:\n self.message_manager.delete(message)\n except:\n print('No message')\n\n def update_message(self, old_message, message):\n self.message_manager.update(old_message, message)\n", "step-5": null, "step-ids": [ 5, 7, 8, 9 ] }
[ 5, 7, 8, 9 ]
import unittest from game_of_life.board import Board from game_of_life.cell import Cell, ALIVE, DEAD def create_test_board(size): board = Board(size) board[0, 0].state = ALIVE board[0, 1].state = ALIVE board[2, 1].state = ALIVE return board class BoardTests(unittest.TestCase): def test_get_neighbours(self): board = create_test_board(3) self.assertListEqual(board.get_neighbour_states(1, 0), [ None, None, ALIVE, ALIVE, DEAD, ALIVE, DEAD, None ]) def test_simple_update(self): alive_cells = [(0, 0), (1, 1), (0, 1)] board = Board(3) board.set_alive_cells(alive_cells) board.update() states = board.list_of_values self.assertListEqual(states, [ [1, 1, 0], [1, 1, 0], [0, 0, 0] ]) def test_simple_update2(self): init_config = [(0, 0), (0, 1), (0, 2)] board = Board(3) board.set_alive_cells(init_config) board.update() states = board.list_of_values self.assertListEqual(states, [ [0, 1, 0], [0, 1, 0], [0, 0, 0] ]) def test_overpopulation(self): init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)] board = Board(3) board.set_alive_cells(init_config) board.update() states = board.list_of_values self.assertListEqual(states, [ [1, 1, 1], [1, 0, 1], [1, 1, 1] ]) class CellTest(unittest.TestCase): def test_is_alive(self): alive_cell = Cell(ALIVE) self.assertTrue(alive_cell.is_alive) dead_cell = Cell(DEAD) self.assertFalse(dead_cell.is_alive) def test_create_life(self): cell = Cell(DEAD) neighbours = [1, 1, 1, 0, 0, 0, 0, None, None] self.assertTrue(cell.will_survive(neighbours)) def test_will_not_be_born(self): cell = Cell(DEAD) neighbours = [1, 1, 0, 0, 0, 0, 0,] self.assertFalse(cell.will_survive(neighbours)) neighbours = [1, 1, 1, 1, 0, 0, 0] self.assertFalse(cell.will_survive(neighbours)) def test_stay_alive(self): cell = Cell(ALIVE) neighbours2 = [1, 1, 0, 0, 0, 0, None, None] self.assertTrue(cell.will_survive(neighbours2)) neighbours3 = [1, 1, 1, 0, 0, 0, None, None] self.assertTrue(cell.will_survive(neighbours3)) def test_will_not_survive_overpopulation(self): cell = Cell(ALIVE) neighbours = [1, 1, 1, 1, 0, 0, 0, 0] self.assertFalse(cell.will_survive(neighbours)) def test_will_not_survive_underpopulation(self): cell = Cell(ALIVE) neighbours = [1, 0, 0, 0, 0, 0, 0] self.assertFalse(cell.will_survive(neighbours)) if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "f644ff322d1268092dbdcbfc1a3c76006424184b", "index": 1482, "step-1": "<mask token>\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n <mask token>\n <mask token>\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n <mask token>\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "import unittest\nfrom game_of_life.board import Board\nfrom game_of_life.cell import Cell, ALIVE, DEAD\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [None, None,\n ALIVE, ALIVE, DEAD, ALIVE, DEAD, None])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 0], [1, 1, 0], [0, 0, 0]])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[0, 1, 0], [0, 1, 0], [0, 0, 0]])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [[1, 1, 1], [1, 0, 1], [1, 1, 1]])\n\n\nclass CellTest(unittest.TestCase):\n\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "import unittest\nfrom game_of_life.board import Board\nfrom game_of_life.cell import Cell, ALIVE, DEAD\n\n\ndef create_test_board(size):\n board = Board(size)\n board[0, 0].state = ALIVE\n board[0, 1].state = ALIVE\n board[2, 1].state = ALIVE\n return board\n\n\nclass BoardTests(unittest.TestCase):\n def test_get_neighbours(self):\n board = create_test_board(3)\n self.assertListEqual(board.get_neighbour_states(1, 0), [\n None, None, ALIVE,\n ALIVE, DEAD,\n ALIVE, DEAD, None\n ])\n\n def test_simple_update(self):\n alive_cells = [(0, 0), (1, 1), (0, 1)]\n board = Board(3)\n board.set_alive_cells(alive_cells)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [1, 1, 0],\n [1, 1, 0],\n [0, 0, 0]\n ])\n\n def test_simple_update2(self):\n init_config = [(0, 0), (0, 1), (0, 2)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [0, 1, 0],\n [0, 1, 0],\n [0, 0, 0]\n ])\n\n def test_overpopulation(self):\n init_config = [(0, 1), (1, 0), (1, 1), (1, 2), (2, 1)]\n board = Board(3)\n board.set_alive_cells(init_config)\n board.update()\n states = board.list_of_values\n self.assertListEqual(states, [\n [1, 1, 1],\n [1, 0, 1],\n [1, 1, 1]\n ])\n\n\nclass CellTest(unittest.TestCase):\n def test_is_alive(self):\n alive_cell = Cell(ALIVE)\n self.assertTrue(alive_cell.is_alive)\n\n dead_cell = Cell(DEAD)\n self.assertFalse(dead_cell.is_alive)\n\n def test_create_life(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours))\n\n def test_will_not_be_born(self):\n cell = Cell(DEAD)\n neighbours = [1, 1, 0, 0, 0, 0, 0,]\n self.assertFalse(cell.will_survive(neighbours))\n neighbours = [1, 1, 1, 1, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_stay_alive(self):\n cell = Cell(ALIVE)\n neighbours2 = [1, 1, 0, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours2))\n neighbours3 = [1, 1, 1, 0, 0, 0, None, None]\n self.assertTrue(cell.will_survive(neighbours3))\n\n def test_will_not_survive_overpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 1, 1, 1, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n def test_will_not_survive_underpopulation(self):\n cell = Cell(ALIVE)\n neighbours = [1, 0, 0, 0, 0, 0, 0]\n self.assertFalse(cell.will_survive(neighbours))\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 10, 11, 14, 15, 16 ] }
[ 10, 11, 14, 15, 16 ]
#Kivy + Box2d test #Not working... from Box2D import * from random import random from kivy.app import App from kivy.uix.widget import Widget from kivy.properties import NumericProperty, ObjectProperty from kivy.lang import Builder from kivy.clock import Clock Builder.load_string(''' <PongBall>: canvas: Color: hsv: self.hue, 1, 1 Ellipse: pos: self.x - self.radius, self.y - self.radius size: self.radius * 2, self.radius * 2 <PongPaddle>: size: 25, 200 canvas: Rectangle: pos:self.pos size:self.size ''') class PongPaddle(Widget): score = NumericProperty(0) def __init__(self, **kwargs): super(PongPaddle,self).__init__(**kwargs) class PongBall(Widget): radius = NumericProperty(20) hue = NumericProperty(0) # for physics world = ObjectProperty(None) _body = ObjectProperty(None) speed = 100 def __init__(self, **kwargs): super(PongBall, self).__init__(**kwargs) self._body = body = self.world.CreateDynamicBody( position = self.pos, linearDamping=0 ) fix = body.CreateCircleFixture( radius = self.radius, density = 0, restitution = 1, friction=0 ) self.hue = random() def update_from_body(self): #constant speed vel = self._body.linearVelocity if(vel.length > 0 and (vel.length > 1.05 or vel.length < 0.95)): t = self.speed/vel.length vel.x = vel.x*t vel.y = vel.y*t self._body.linearVelocity = vel self.pos = self._body.position.x, self._body.position.y class PongGame(App): ball = ObjectProperty(None) player1 = ObjectProperty(None) player2 = ObjectProperty(None) def touchdown(self, instance, touch): self.serve_ball() def serve_ball(self): vel = self.ball._body.linearVelocity vel.x = random()-0.5 vel.y = random()-0.5 self.ball._body.linearVelocity = vel #self.ball._body.SetPosition(b2Vec2(200,200)) self.ball._body.SetTransform(b2Vec2(200,200),0) #self.ball._body.position.x = 200 #self.ball._body.position.y = 200 def build(self): canvas = Widget() canvas.bind(on_touch_down=self.touchdown) self.world = world = b2World((0,-10), True) edges = self.world.CreateStaticBody( shapes=b2EdgeShape(vertices=[(-4000,0),(0,4000)]) ) edges.position.Set(0,0) self.ball = ball = PongBall(y=200,x=200,world=world) canvas.add_widget(ball) self.serve_ball() Clock.schedule_interval(self.update, 1/60) return canvas def update(self, dt): self.world.Step(dt, 10, 8) self.ball.update_from_body() PongGame().run()
normal
{ "blob_id": "fa8431ae96cd6c1133d56285d0168f43d9068bc5", "index": 2099, "step-1": "<mask token>\n\n\nclass PongBall(Widget):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, **kwargs):\n super(PongBall, self).__init__(**kwargs)\n self._body = body = self.world.CreateDynamicBody(position=self.pos,\n linearDamping=0)\n fix = body.CreateCircleFixture(radius=self.radius, density=0,\n restitution=1, friction=0)\n self.hue = random()\n <mask token>\n\n\nclass PongGame(App):\n ball = ObjectProperty(None)\n player1 = ObjectProperty(None)\n player2 = ObjectProperty(None)\n\n def touchdown(self, instance, touch):\n self.serve_ball()\n\n def serve_ball(self):\n vel = self.ball._body.linearVelocity\n vel.x = random() - 0.5\n vel.y = random() - 0.5\n self.ball._body.linearVelocity = vel\n self.ball._body.SetTransform(b2Vec2(200, 200), 0)\n\n def build(self):\n canvas = Widget()\n canvas.bind(on_touch_down=self.touchdown)\n self.world = world = b2World((0, -10), True)\n edges = self.world.CreateStaticBody(shapes=b2EdgeShape(vertices=[(-\n 4000, 0), (0, 4000)]))\n edges.position.Set(0, 0)\n self.ball = ball = PongBall(y=200, x=200, world=world)\n canvas.add_widget(ball)\n self.serve_ball()\n Clock.schedule_interval(self.update, 1 / 60)\n return canvas\n\n def update(self, dt):\n self.world.Step(dt, 10, 8)\n self.ball.update_from_body()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass PongPaddle(Widget):\n <mask token>\n\n def __init__(self, **kwargs):\n super(PongPaddle, self).__init__(**kwargs)\n\n\nclass PongBall(Widget):\n radius = NumericProperty(20)\n hue = NumericProperty(0)\n world = ObjectProperty(None)\n _body = ObjectProperty(None)\n speed = 100\n\n def __init__(self, **kwargs):\n super(PongBall, self).__init__(**kwargs)\n self._body = body = self.world.CreateDynamicBody(position=self.pos,\n linearDamping=0)\n fix = body.CreateCircleFixture(radius=self.radius, density=0,\n restitution=1, friction=0)\n self.hue = random()\n\n def update_from_body(self):\n vel = self._body.linearVelocity\n if vel.length > 0 and (vel.length > 1.05 or vel.length < 0.95):\n t = self.speed / vel.length\n vel.x = vel.x * t\n vel.y = vel.y * t\n self._body.linearVelocity = vel\n self.pos = self._body.position.x, self._body.position.y\n\n\nclass PongGame(App):\n ball = ObjectProperty(None)\n player1 = ObjectProperty(None)\n player2 = ObjectProperty(None)\n\n def touchdown(self, instance, touch):\n self.serve_ball()\n\n def serve_ball(self):\n vel = self.ball._body.linearVelocity\n vel.x = random() - 0.5\n vel.y = random() - 0.5\n self.ball._body.linearVelocity = vel\n self.ball._body.SetTransform(b2Vec2(200, 200), 0)\n\n def build(self):\n canvas = Widget()\n canvas.bind(on_touch_down=self.touchdown)\n self.world = world = b2World((0, -10), True)\n edges = self.world.CreateStaticBody(shapes=b2EdgeShape(vertices=[(-\n 4000, 0), (0, 4000)]))\n edges.position.Set(0, 0)\n self.ball = ball = PongBall(y=200, x=200, world=world)\n canvas.add_widget(ball)\n self.serve_ball()\n Clock.schedule_interval(self.update, 1 / 60)\n return canvas\n\n def update(self, dt):\n self.world.Step(dt, 10, 8)\n self.ball.update_from_body()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass PongPaddle(Widget):\n score = NumericProperty(0)\n\n def __init__(self, **kwargs):\n super(PongPaddle, self).__init__(**kwargs)\n\n\nclass PongBall(Widget):\n radius = NumericProperty(20)\n hue = NumericProperty(0)\n world = ObjectProperty(None)\n _body = ObjectProperty(None)\n speed = 100\n\n def __init__(self, **kwargs):\n super(PongBall, self).__init__(**kwargs)\n self._body = body = self.world.CreateDynamicBody(position=self.pos,\n linearDamping=0)\n fix = body.CreateCircleFixture(radius=self.radius, density=0,\n restitution=1, friction=0)\n self.hue = random()\n\n def update_from_body(self):\n vel = self._body.linearVelocity\n if vel.length > 0 and (vel.length > 1.05 or vel.length < 0.95):\n t = self.speed / vel.length\n vel.x = vel.x * t\n vel.y = vel.y * t\n self._body.linearVelocity = vel\n self.pos = self._body.position.x, self._body.position.y\n\n\nclass PongGame(App):\n ball = ObjectProperty(None)\n player1 = ObjectProperty(None)\n player2 = ObjectProperty(None)\n\n def touchdown(self, instance, touch):\n self.serve_ball()\n\n def serve_ball(self):\n vel = self.ball._body.linearVelocity\n vel.x = random() - 0.5\n vel.y = random() - 0.5\n self.ball._body.linearVelocity = vel\n self.ball._body.SetTransform(b2Vec2(200, 200), 0)\n\n def build(self):\n canvas = Widget()\n canvas.bind(on_touch_down=self.touchdown)\n self.world = world = b2World((0, -10), True)\n edges = self.world.CreateStaticBody(shapes=b2EdgeShape(vertices=[(-\n 4000, 0), (0, 4000)]))\n edges.position.Set(0, 0)\n self.ball = ball = PongBall(y=200, x=200, world=world)\n canvas.add_widget(ball)\n self.serve_ball()\n Clock.schedule_interval(self.update, 1 / 60)\n return canvas\n\n def update(self, dt):\n self.world.Step(dt, 10, 8)\n self.ball.update_from_body()\n\n\n<mask token>\n", "step-4": "from Box2D import *\nfrom random import random\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import NumericProperty, ObjectProperty\nfrom kivy.lang import Builder\nfrom kivy.clock import Clock\nBuilder.load_string(\n \"\"\"\n<PongBall>:\n canvas:\n Color:\n hsv: self.hue, 1, 1\n Ellipse:\n pos: self.x - self.radius, self.y - self.radius\n size: self.radius * 2, self.radius * 2\n\n<PongPaddle>:\n size: 25, 200\n canvas:\n Rectangle:\n pos:self.pos\n size:self.size\n\"\"\"\n )\n\n\nclass PongPaddle(Widget):\n score = NumericProperty(0)\n\n def __init__(self, **kwargs):\n super(PongPaddle, self).__init__(**kwargs)\n\n\nclass PongBall(Widget):\n radius = NumericProperty(20)\n hue = NumericProperty(0)\n world = ObjectProperty(None)\n _body = ObjectProperty(None)\n speed = 100\n\n def __init__(self, **kwargs):\n super(PongBall, self).__init__(**kwargs)\n self._body = body = self.world.CreateDynamicBody(position=self.pos,\n linearDamping=0)\n fix = body.CreateCircleFixture(radius=self.radius, density=0,\n restitution=1, friction=0)\n self.hue = random()\n\n def update_from_body(self):\n vel = self._body.linearVelocity\n if vel.length > 0 and (vel.length > 1.05 or vel.length < 0.95):\n t = self.speed / vel.length\n vel.x = vel.x * t\n vel.y = vel.y * t\n self._body.linearVelocity = vel\n self.pos = self._body.position.x, self._body.position.y\n\n\nclass PongGame(App):\n ball = ObjectProperty(None)\n player1 = ObjectProperty(None)\n player2 = ObjectProperty(None)\n\n def touchdown(self, instance, touch):\n self.serve_ball()\n\n def serve_ball(self):\n vel = self.ball._body.linearVelocity\n vel.x = random() - 0.5\n vel.y = random() - 0.5\n self.ball._body.linearVelocity = vel\n self.ball._body.SetTransform(b2Vec2(200, 200), 0)\n\n def build(self):\n canvas = Widget()\n canvas.bind(on_touch_down=self.touchdown)\n self.world = world = b2World((0, -10), True)\n edges = self.world.CreateStaticBody(shapes=b2EdgeShape(vertices=[(-\n 4000, 0), (0, 4000)]))\n edges.position.Set(0, 0)\n self.ball = ball = PongBall(y=200, x=200, world=world)\n canvas.add_widget(ball)\n self.serve_ball()\n Clock.schedule_interval(self.update, 1 / 60)\n return canvas\n\n def update(self, dt):\n self.world.Step(dt, 10, 8)\n self.ball.update_from_body()\n\n\nPongGame().run()\n", "step-5": "#Kivy + Box2d test\n#Not working...\n\nfrom Box2D import *\n\nfrom random import random\nfrom kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import NumericProperty, ObjectProperty\nfrom kivy.lang import Builder\nfrom kivy.clock import Clock\n\nBuilder.load_string('''\n<PongBall>:\n canvas:\n Color:\n hsv: self.hue, 1, 1\n Ellipse:\n pos: self.x - self.radius, self.y - self.radius\n size: self.radius * 2, self.radius * 2\n\n<PongPaddle>:\n size: 25, 200\n canvas:\n Rectangle:\n pos:self.pos\n size:self.size\n''')\n\n\nclass PongPaddle(Widget):\n\tscore = NumericProperty(0)\n\n\tdef __init__(self, **kwargs):\n\t\tsuper(PongPaddle,self).__init__(**kwargs)\n\n\n\nclass PongBall(Widget):\n\tradius = NumericProperty(20)\n\thue = NumericProperty(0)\n\n\t# for physics\n\tworld = ObjectProperty(None)\n\t_body = ObjectProperty(None)\n\tspeed = 100\n\tdef __init__(self, **kwargs):\n\t\tsuper(PongBall, self).__init__(**kwargs)\n\t\t\n\t\tself._body = body = self.world.CreateDynamicBody(\n\t\t\tposition = self.pos,\n\t\t\tlinearDamping=0\n\t\t\t)\n\t\tfix = body.CreateCircleFixture(\n\t\t\tradius = self.radius,\n\t\t\tdensity = 0,\n\t\t\trestitution = 1,\n\t\t\tfriction=0\n\t\t\t)\n\t\tself.hue = random()\n\n\tdef update_from_body(self):\n\t\t#constant speed\n\t\tvel = self._body.linearVelocity\n\t\tif(vel.length > 0 and (vel.length > 1.05 or vel.length < 0.95)):\n\t\t\tt = self.speed/vel.length\n\t\t\tvel.x = vel.x*t\n\t\t\tvel.y = vel.y*t\n\t\t\tself._body.linearVelocity = vel\n\t\tself.pos = self._body.position.x, self._body.position.y\n\n\nclass PongGame(App):\n\tball = ObjectProperty(None)\n\tplayer1 = ObjectProperty(None)\n\tplayer2 = ObjectProperty(None)\n\n\tdef touchdown(self, instance, touch):\n\t\tself.serve_ball()\n\n\tdef serve_ball(self):\n\t\tvel = self.ball._body.linearVelocity\n\t\tvel.x = random()-0.5\n\t\tvel.y = random()-0.5\n\t\tself.ball._body.linearVelocity = vel\n\n\t\t#self.ball._body.SetPosition(b2Vec2(200,200))\n\t\tself.ball._body.SetTransform(b2Vec2(200,200),0)\n\t\t#self.ball._body.position.x = 200\n\t\t#self.ball._body.position.y = 200\n\n\tdef build(self):\n\t\tcanvas = Widget()\n\t\tcanvas.bind(on_touch_down=self.touchdown)\n\t\tself.world = world = b2World((0,-10), True)\n\n\t\tedges = self.world.CreateStaticBody(\n\t\t\tshapes=b2EdgeShape(vertices=[(-4000,0),(0,4000)]) \n\t\t\t)\n\t\tedges.position.Set(0,0)\n\n\t\tself.ball = ball = PongBall(y=200,x=200,world=world)\n\t\tcanvas.add_widget(ball)\n\n\t\tself.serve_ball()\n\n\t\tClock.schedule_interval(self.update, 1/60)\n\t\treturn canvas\n\n\tdef update(self, dt):\n\t\tself.world.Step(dt, 10, 8)\n\t\tself.ball.update_from_body()\n\t\t\n\nPongGame().run()", "step-ids": [ 8, 12, 13, 15, 16 ] }
[ 8, 12, 13, 15, 16 ]
import sys lines = sys.stdin.readlines() t = int(lines[0]) for i in range(t): c = i*10+1 n = int(lines[c]) - 1 first = [x.strip() for x in [ lines[c+1], lines[c+2], lines[c+3], lines[c+4]]] first = [s.split() for s in first] m = int(lines[c+5]) - 1 second = [x.strip() for x in [ lines[c+6], lines[c+7], lines[c+8], lines[c+9]]] second = [s.split() for s in second] results = [a for a in first[n] if a in second[m] and a is not ' '] if len(results) == 1: print 'Case #{nr}: {number}'.format(nr=(i+1), number=results[0]) if len(results) > 1: print 'Case #{nr}: Bad magician!'.format(nr=(i+1)) if len(results) == 0: print 'Case #{nr}: Volunteer cheated!'.format(nr=(i+1))
normal
{ "blob_id": "d6bc8afcdb7636085b01add860f808024fbe566d", "index": 2428, "step-1": "import sys\n\nlines = sys.stdin.readlines()\n\nt = int(lines[0])\n\nfor i in range(t):\n c = i*10+1\n n = int(lines[c]) - 1\n first = [x.strip() for x in [\n lines[c+1],\n lines[c+2],\n lines[c+3],\n lines[c+4]]]\n first = [s.split() for s in first]\n m = int(lines[c+5]) - 1\n second = [x.strip() for x in [\n lines[c+6],\n lines[c+7],\n lines[c+8],\n lines[c+9]]]\n second = [s.split() for s in second]\n results = [a for a in first[n] if a in second[m] and a is not ' ']\n if len(results) == 1:\n print 'Case #{nr}: {number}'.format(nr=(i+1), number=results[0])\n if len(results) > 1:\n print 'Case #{nr}: Bad magician!'.format(nr=(i+1))\n if len(results) == 0:\n print 'Case #{nr}: Volunteer cheated!'.format(nr=(i+1))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from rest_framework import serializers from . import models class RaumSerializer(serializers.ModelSerializer): class Meta: model = models.Raum fields = [ "Raumnummer", "Anzahl_Sitzplaetze", "Beamer", "Whiteboard", ] class ZeitraumSerializer(serializers.ModelSerializer): class Meta: model = models.Zeitraum fields = [ "Vorlesungszeit", "EndTime", "Datum", "StartTime", ] class RaumbelegungSerializer(serializers.ModelSerializer): class Meta: model = models.Raumbelegung fields = [ "Belegt", "Belegungsgrund", ]
normal
{ "blob_id": "451c353a949458f5f71783c4aba1888c40018bfa", "index": 9400, "step-1": "<mask token>\n\n\nclass RaumbelegungSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raumbelegung\n fields = ['Belegt', 'Belegungsgrund']\n", "step-2": "<mask token>\n\n\nclass ZeitraumSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Zeitraum\n fields = ['Vorlesungszeit', 'EndTime', 'Datum', 'StartTime']\n\n\nclass RaumbelegungSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raumbelegung\n fields = ['Belegt', 'Belegungsgrund']\n", "step-3": "<mask token>\n\n\nclass RaumSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raum\n fields = ['Raumnummer', 'Anzahl_Sitzplaetze', 'Beamer', 'Whiteboard']\n\n\nclass ZeitraumSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Zeitraum\n fields = ['Vorlesungszeit', 'EndTime', 'Datum', 'StartTime']\n\n\nclass RaumbelegungSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raumbelegung\n fields = ['Belegt', 'Belegungsgrund']\n", "step-4": "from rest_framework import serializers\nfrom . import models\n\n\nclass RaumSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raum\n fields = ['Raumnummer', 'Anzahl_Sitzplaetze', 'Beamer', 'Whiteboard']\n\n\nclass ZeitraumSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Zeitraum\n fields = ['Vorlesungszeit', 'EndTime', 'Datum', 'StartTime']\n\n\nclass RaumbelegungSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = models.Raumbelegung\n fields = ['Belegt', 'Belegungsgrund']\n", "step-5": "from rest_framework import serializers\n\nfrom . import models\n\n\nclass RaumSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Raum\n fields = [\n \"Raumnummer\",\n \"Anzahl_Sitzplaetze\",\n \"Beamer\",\n \"Whiteboard\",\n ]\n\nclass ZeitraumSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Zeitraum\n fields = [\n \"Vorlesungszeit\",\n \"EndTime\",\n \"Datum\",\n \"StartTime\",\n ]\n\nclass RaumbelegungSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = models.Raumbelegung\n fields = [\n \"Belegt\",\n \"Belegungsgrund\",\n ]\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import xl2dict myxlobject= XlToDict() myxlobject.convert_sheet_to_dict(file_path="Soul Breaks.xlsx", sheet="First Sheet", filter_variables_dict={"User Type" : "Admin", "Environment" : "Dev"})
normal
{ "blob_id": "8ec981bf8746e09d3865bc20dcfbf2fbd797c145", "index": 7511, "step-1": "<mask token>\n", "step-2": "<mask token>\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n", "step-3": "<mask token>\nmyxlobject = XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n", "step-4": "import xl2dict\nmyxlobject = XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path='Soul Breaks.xlsx', sheet=\n 'First Sheet', filter_variables_dict={'User Type': 'Admin',\n 'Environment': 'Dev'})\n", "step-5": "import xl2dict\n\nmyxlobject= XlToDict()\nmyxlobject.convert_sheet_to_dict(file_path=\"Soul Breaks.xlsx\", sheet=\"First Sheet\",\n filter_variables_dict={\"User Type\" : \"Admin\", \"Environment\" : \"Dev\"})", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# # -*- coding: utf-8 -*- # # """ # Py40 PyQt5 tutorial # # This example shows three labels on a window # using absolute positioning. # # author: Jan Bodnar # website: py40.com # last edited: January 2015 # """ # # import sys # from PyQt5.QtWidgets import QWidget, QLabel, QApplication # # # class Example(QWidget): # # def __init__(self): # super().__init__() # # self.initUI() # # def initUI(self): # lbl1 = QLabel('Zetcode', self) # lbl1.move(15, 10) # # lbl2 = QLabel('tutorials', self) # lbl2.move(35, 40) # # lbl3 = QLabel('for programmers', self) # lbl3.move(55, 70) # # self.setGeometry(300, 300, 250, 150) # self.setWindowTitle('Absolute') # self.show() # # # if __name__ == '__main__': # app = QApplication(sys.argv) # ex = Example() # sys.exit(app.exec_()) import psycopg2 def absent(lectureid,sectionid): connection = psycopg2.connect(database="profmate", user="python", password="python", host="34.74.217.167", port="5432") cursor = connection.cursor() postgreSQL_select_Query ="select * from lec_%s \ where student_id not in (select base.studentid\ from (select S.SectionID,Lectures.Lecture_Name,P.StudentID\ from Sections As S\ Join POOL as P\ On (P.Time > S.Time_Start)\ and (P.Time < S.Time_End)\ Join Lectures\ ON S.LectureID = Lectures.Lecture_ID\ Order By SectionID) as base\ join Students \ ON base.studentid = Students.Student_ID\ where sectionid = '%s' );" cursor.execute(postgreSQL_select_Query,(lectureid,sectionid)) print("Selecting rows from POOL table using cursor.fetchall") current_table = cursor.fetchall() print("Print each row and it's columns values") longstring = str('') for row in current_table: # print("Student ID = ", row[0]) # print("Family Name = ", row[1]) # print("Given Name = ", row[2], "\n") longstring = "".join((longstring, "Student ID = ",str(row[0]),"\n")) longstring = "".join((longstring, "Family Name = ", row[1], "\n")) longstring = "".join((longstring, "Given Name = ", row[2], "\n")) cursor.close() connection.close() print("PostgreSQL connection is closed") return longstring if __name__ == '__main__': a = '234567890' b = 'Tester' c = 'One' # insert_students(a, b, c) print(absent(101, 1001))
normal
{ "blob_id": "e05dac901228e6972c1cb48ce2def3d248b4c167", "index": 3053, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n print(absent(101, 1001))\n", "step-4": "import psycopg2\n\n\ndef absent(lectureid, sectionid):\n connection = psycopg2.connect(database='profmate', user='python',\n password='python', host='34.74.217.167', port='5432')\n cursor = connection.cursor()\n postgreSQL_select_Query = (\n \"select * from lec_%s where student_id not in (select base.studentid from (select S.SectionID,Lectures.Lecture_Name,P.StudentID from Sections As S Join POOL as P On (P.Time > S.Time_Start) and (P.Time < S.Time_End) Join Lectures ON S.LectureID = Lectures.Lecture_ID Order By SectionID) as base join Students ON base.studentid = Students.Student_ID where sectionid = '%s' );\"\n )\n cursor.execute(postgreSQL_select_Query, (lectureid, sectionid))\n print('Selecting rows from POOL table using cursor.fetchall')\n current_table = cursor.fetchall()\n print(\"Print each row and it's columns values\")\n longstring = str('')\n for row in current_table:\n longstring = ''.join((longstring, 'Student ID = ', str(row[0]), '\\n'))\n longstring = ''.join((longstring, 'Family Name = ', row[1], '\\n'))\n longstring = ''.join((longstring, 'Given Name = ', row[2], '\\n'))\n cursor.close()\n connection.close()\n print('PostgreSQL connection is closed')\n return longstring\n\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n print(absent(101, 1001))\n", "step-5": "# # -*- coding: utf-8 -*-\n#\n# \"\"\"\n# Py40 PyQt5 tutorial\n#\n# This example shows three labels on a window\n# using absolute positioning.\n#\n# author: Jan Bodnar\n# website: py40.com\n# last edited: January 2015\n# \"\"\"\n#\n# import sys\n# from PyQt5.QtWidgets import QWidget, QLabel, QApplication\n#\n#\n# class Example(QWidget):\n#\n# def __init__(self):\n# super().__init__()\n#\n# self.initUI()\n#\n# def initUI(self):\n# lbl1 = QLabel('Zetcode', self)\n# lbl1.move(15, 10)\n#\n# lbl2 = QLabel('tutorials', self)\n# lbl2.move(35, 40)\n#\n# lbl3 = QLabel('for programmers', self)\n# lbl3.move(55, 70)\n#\n# self.setGeometry(300, 300, 250, 150)\n# self.setWindowTitle('Absolute')\n# self.show()\n#\n#\n# if __name__ == '__main__':\n# app = QApplication(sys.argv)\n# ex = Example()\n# sys.exit(app.exec_())\nimport psycopg2\n\ndef absent(lectureid,sectionid):\n connection = psycopg2.connect(database=\"profmate\", user=\"python\", password=\"python\", host=\"34.74.217.167\",\n port=\"5432\")\n cursor = connection.cursor()\n postgreSQL_select_Query =\"select * from lec_%s \\\n where student_id not in (select base.studentid\\\n from (select S.SectionID,Lectures.Lecture_Name,P.StudentID\\\n from Sections As S\\\n Join POOL as P\\\n On (P.Time > S.Time_Start)\\\n and (P.Time < S.Time_End)\\\n Join Lectures\\\n ON S.LectureID = Lectures.Lecture_ID\\\n Order By SectionID) as base\\\n join Students \\\n ON base.studentid = Students.Student_ID\\\n where sectionid = '%s' );\"\n\n cursor.execute(postgreSQL_select_Query,(lectureid,sectionid))\n print(\"Selecting rows from POOL table using cursor.fetchall\")\n current_table = cursor.fetchall()\n\n print(\"Print each row and it's columns values\")\n\n longstring = str('')\n for row in current_table:\n # print(\"Student ID = \", row[0])\n # print(\"Family Name = \", row[1])\n # print(\"Given Name = \", row[2], \"\\n\")\n longstring = \"\".join((longstring, \"Student ID = \",str(row[0]),\"\\n\"))\n longstring = \"\".join((longstring, \"Family Name = \", row[1], \"\\n\"))\n longstring = \"\".join((longstring, \"Given Name = \", row[2], \"\\n\"))\n\n cursor.close()\n connection.close()\n print(\"PostgreSQL connection is closed\")\n return longstring\n\nif __name__ == '__main__':\n a = '234567890'\n b = 'Tester'\n c = 'One'\n # insert_students(a, b, c)\n print(absent(101, 1001))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import glob import csv import math import pandas # this is used to train the model, try different model, generate the csv file of the result import pandas import pandas as pd import pickle from sklearn.linear_model import LogisticRegression from sklearn import metrics from sklearn import datasets from sklearn.preprocessing import StandardScaler import numpy as np from sklearn.metrics import classification_report, confusion_matrix import csv from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import RidgeClassifierCV import attr # from pycm import * from sklearn.metrics import f1_score from sklearn.metrics import matthews_corrcoef from sklearn.metrics import cohen_kappa_score from sklearn import tree from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB from sklearn.ensemble import RandomForestClassifier import numpy as np # evaluation_path = '/aul/homes/qli027/projects/RNN/evaluation/random/' # # activity = ['work','go_back_home','baby_present','entertainment','smoke','alexa','others','print','check_body_condition'] # for i in range (0,9): # with open(evaluation_path + str(i) +'.csv', 'w') as new: # realnames = ['model','TP','FN','TN','FP'] # writer = csv.DictWriter(new, fieldnames = realnames) # writer.writeheader() # new.close() def naiveBayes(X_train, y_train): model = GaussianNB() model = model.fit(X_train, y_train) return (model) def knn(X_train, y_train): model = KNeighborsClassifier() model = model.fit(X_train, y_train) return (model) def decisionTree(X_train, y_train): model = tree.DecisionTreeClassifier(class_weight='balanced') model = model.fit(X_train, y_train) return (model) def svm_linear(X_train, y_train): model = SVC(kernel='linear', class_weight='balanced') model = model.fit(X_train, y_train) return (model) def svm_2(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=2, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_3(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=3, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_4(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=4, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_5(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=5, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_6(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=6, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_7(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=7, random_state=0) model = model.fit(X_train, y_train) return (model) def svm_8(X_train, y_train): model = SVC(kernel='poly', class_weight='balanced', degree=8, random_state=0) model = model.fit(X_train, y_train) return (model) def logisticRegression(X_train, y_train): model = LogisticRegression(class_weight='balanced') model = model.fit(X_train, y_train) return (model) def passiveAggressiveClassifier(X_train, y_train): model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=1e-3, class_weight='balanced') model = model.fit(X_train, y_train) return (model) def svm_rbf(X_train, y_train): model = SVC(kernel='rbf', class_weight='balanced') model = model.fit(X_train, y_train) return (model) def random_forest(X_train, y_train): model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0, class_weight='balanced') model = model.fit(X_train, y_train) return (model) def ridgeClassifierCV(X_train, y_train): model = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1], class_weight='balanced') model = model.fit(X_train, y_train) return (model) def evaluation_result(y_test, y_pred, model): cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]) print(cnf_matrix) FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix) FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix) TP = np.diag(cnf_matrix) TN = cnf_matrix.sum() - (FP + FN + TP) FP = FP.astype(int) FN = FN.astype(int) TP = TP.astype(int) TN = TN.astype(int) print(TP, TN, FP, FN) evaluation_path = 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/' for i in range(0, 13): with open(evaluation_path + str(i) + '.csv', 'a') as csvfile: writer = csv.writer(csvfile) writer.writerow([model, TP[i], FN[i], TN[i], FP[i]]) csvfile.close() # data = pd.read_csv("C:/penv/unsw/csvfiles/labeled/count/useractivity/new.csv") data = data.dropna() feature_cols = ['Size', 'Amazon Echo', 'Belkin Motion', 'Belkin Switch', 'Blipcare BloodPressure Meter', 'HP Printer', 'Dropcam', 'Insteon Camera', 'LIFX Smart Bulb', 'NEST Smoke Alarm', 'Netatmo Welcome Camera', 'Netatmo Weather Station', 'PIX-STAR Photo-frame', 'Samsung SmartCam', 'Smart Things', 'TP-Link Day Night Cloud camera', 'TP-Link Smart plug', 'Triby Speaker', 'Withings Smart Baby Monitor', 'Withings Smart scale', 'Withings Aura smart sleep sensor', 'iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1', 'Laptop', 'MacBook', 'Android Phone 2', 'iPhone', 'MacBook/iPhone'] # feature_cols = [ 'Amazon Echo', 'Belkin Motion', # 'Belkin Switch','Blipcare BloodPressure Meter','HP Printer','Dropcam','Insteon Camera', # 'LIFX Smart Bulb', 'NEST Smoke Alarm','Netatmo Welcome Camera', 'Netatmo Weather Station', # 'PIX-STAR Photo-frame','Samsung SmartCam','Smart Things', 'TP-Link Day Night Cloud camera', # 'TP-Link Smart plug','Triby Speaker','Withings Smart Baby Monitor','Withings Smart scale', # 'Withings Aura smart sleep sensor','iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1', # 'Laptop', 'MacBook', 'Android Phone 2','iPhone','MacBookiPhone'] # feature_cols = ['Size'] X = data[feature_cols] scaler = StandardScaler() X = scaler.fit_transform(X) # Features y = data['User Activity'] # Target variable # instantiate the model (using the default parameters) from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30) # d = [decisionTree, logisticRegression,knn, svm_linear, svm_2,svm_3,svm_rbf,ridgeClassifierCV,naiveBayes,cnn_3layers,random_forest] model = decisionTree(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'decisionTree') model = logisticRegression(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'logisticRegression') model = knn(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'knn') model = svm_linear(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'svm_linear') model = svm_2(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'svm_2') model = svm_3(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'svm_3') model = svm_rbf(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'svm_rbf') model = naiveBayes(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'naiveBayes') model = random_forest(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'random_forest') model = ridgeClassifierCV(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'ridgeClassifierCV') model = passiveAggressiveClassifier(X_train, y_train) y_pred = model.predict(X_test) evaluation_result(y_test, y_pred, 'passiveAggressiveClassifier')
normal
{ "blob_id": "a92384a6abee9e231092ee0e4dbdb60bafcc9979", "index": 8782, "step-1": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=\n 0.001, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_2(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=2,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=\n 0.001, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n", "step-5": "import glob\nimport csv\nimport math\n\nimport pandas\n\n# this is used to train the model, try different model, generate the csv file of the result\n\nimport pandas\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport csv\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifierCV\nimport attr\n# from pycm import *\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import matthews_corrcoef\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn import tree\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\n\n\n# evaluation_path = '/aul/homes/qli027/projects/RNN/evaluation/random/'\n# # activity = ['work','go_back_home','baby_present','entertainment','smoke','alexa','others','print','check_body_condition']\n# for i in range (0,9):\n# with open(evaluation_path + str(i) +'.csv', 'w') as new:\n# realnames = ['model','TP','FN','TN','FP']\n# writer = csv.DictWriter(new, fieldnames = realnames)\n# writer.writeheader()\n# new.close()\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_2(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=2, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=1e-3, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1], class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n\n evaluation_path = 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/'\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n#\n\n\ndata = pd.read_csv(\"C:/penv/unsw/csvfiles/labeled/count/useractivity/new.csv\")\ndata = data.dropna()\n\nfeature_cols = ['Size', 'Amazon Echo', 'Belkin Motion',\n 'Belkin Switch', 'Blipcare BloodPressure Meter', 'HP Printer', 'Dropcam', 'Insteon Camera',\n 'LIFX Smart Bulb', 'NEST Smoke Alarm', 'Netatmo Welcome Camera', 'Netatmo Weather Station',\n 'PIX-STAR Photo-frame', 'Samsung SmartCam', 'Smart Things', 'TP-Link Day Night Cloud camera',\n 'TP-Link Smart plug', 'Triby Speaker', 'Withings Smart Baby Monitor', 'Withings Smart scale',\n 'Withings Aura smart sleep sensor', 'iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',\n 'Laptop', 'MacBook', 'Android Phone 2', 'iPhone', 'MacBook/iPhone']\n# feature_cols = [ 'Amazon Echo', 'Belkin Motion',\n# 'Belkin Switch','Blipcare BloodPressure Meter','HP Printer','Dropcam','Insteon Camera',\n# 'LIFX Smart Bulb', 'NEST Smoke Alarm','Netatmo Welcome Camera', 'Netatmo Weather Station',\n# 'PIX-STAR Photo-frame','Samsung SmartCam','Smart Things', 'TP-Link Day Night Cloud camera',\n# 'TP-Link Smart plug','Triby Speaker','Withings Smart Baby Monitor','Withings Smart scale',\n# 'Withings Aura smart sleep sensor','iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',\n# 'Laptop', 'MacBook', 'Android Phone 2','iPhone','MacBookiPhone']\n# feature_cols = ['Size']\nX = data[feature_cols]\nscaler = StandardScaler()\nX = scaler.fit_transform(X) # Features\ny = data['User Activity'] # Target variable\n\n# instantiate the model (using the default parameters)\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n# d = [decisionTree, logisticRegression,knn, svm_linear, svm_2,svm_3,svm_rbf,ridgeClassifierCV,naiveBayes,cnn_3layers,random_forest]\nmodel = decisionTree(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'decisionTree')\n\nmodel = logisticRegression(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'logisticRegression')\n\nmodel = knn(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'knn')\n\nmodel = svm_linear(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_linear')\n\nmodel = svm_2(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_2')\n\nmodel = svm_3(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_3')\n\nmodel = svm_rbf(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_rbf')\n\nmodel = naiveBayes(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'naiveBayes')\n\nmodel = random_forest(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'random_forest')\n\nmodel = ridgeClassifierCV(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'ridgeClassifierCV')\n\nmodel = passiveAggressiveClassifier(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'passiveAggressiveClassifier')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 13, 14, 16, 17, 21 ] }
[ 13, 14, 16, 17, 21 ]
# coding: utf-8 import datetime import json import requests import os import re import sys from todoist.api import TodoistAPI #SLACK_CHANNEL = os.environ['SLACK_CHANNEL'] #SLACK_POSTURL = os.environ['SLACK_POSTURL'] TDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False) TDIAPI.sync() name = os.environ['TODOIST_PJT'] def lambda_handler(event, context): if event["function"] == 'tasklist': msg = tasklist(name) if event["function"] == 'activity': msg = activity(name) return def activity(name): actlogs = TDIAPI.activity.get() pjts = TDIAPI.state['projects'] for projects_id in pjts: if projects_id['name'] == name: tasks_project_id = projects_id['id'] break else: print('[INFO] Not match project name') event_list = [] for events in actlogs['events']: today = datetime.datetime.now().strftime("%Y-%m-%d") ''' todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック ''' todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9) todoist_date = str(todoist_times.strftime("%Y-%m-%d")) if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id: event_list.append(events['extra_data']['content']) print(event_list) return event_list def tasklist(name): pjts = TDIAPI.state['projects'] items = TDIAPI.state['items'] labels = TDIAPI.state['labels'] sects = TDIAPI.state['sections'] inbox_list = [] doing_list = [] review_list = [] any_list = [] for projects_id in list: if projects_id['name'] == name: tasks_project_id = projects_id['id'] break try: tasks_project_id except NameError: print("プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。") return print(labels) sys.exit() for item in items: l_content = item['content'] l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ] l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']] #print('+++') #print(l_pjt_id) #print(l_content) #print(l_sec_name[0]) if l_sec_name is not None and l_sec_name[0] == 'ToDo': print(l_sec_name) #if item['checked'] == 0 and item['project_id'] == tasks_project_id: #taskcontent = '- ' + item['content'] #slackmessage.append(taskcontent) #print(taskcontent) #print(slackmessage) #message = '\n'.join(slackmessage) return def slack_notify(): title = "*[定期通知] プロジェクト " + name + " のタスクリスト*\n" slack_message = { 'channel': SLACK_CHANNEL, 'icon_emoji': ":todoist:", 'text': title, "attachments": [ { "color": "#36a64f", "fields": [ { "value": msg, }, ], } ] } #requests.post(SLACK_POSTURL, data=json.dumps(slack_message))
normal
{ "blob_id": "3c3d45f0844496b8d623286b36a4935a154f410a", "index": 4133, "step-1": "<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\n<mask token>\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n", "step-2": "<mask token>\nTDIAPI.sync()\n<mask token>\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n", "step-3": "<mask token>\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n", "step-4": "import datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\n\ndef lambda_handler(event, context):\n if event['function'] == 'tasklist':\n msg = tasklist(name)\n if event['function'] == 'activity':\n msg = activity(name)\n return\n\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime('%Y-%m-%d')\n \"\"\"\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n \"\"\"\n todoist_times = datetime.datetime.strptime(events['event_date'],\n '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours=9)\n todoist_date = str(todoist_times.strftime('%Y-%m-%d'))\n if events['event_type'\n ] == 'completed' and todoist_date == today and events[\n 'parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n print(event_list)\n return event_list\n\n\ndef tasklist(name):\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n try:\n tasks_project_id\n except NameError:\n print('プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。')\n return\n print(labels)\n sys.exit()\n for item in items:\n l_content = item['content']\n l_pjt_name = [pjt['name'] for pjt in pjts if item['project_id'] ==\n pjt['id']]\n l_sec_name = [sect['name'] for sect in sects if item['section_id'] ==\n sect['id']]\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n return\n\n\ndef slack_notify():\n title = '*[定期通知] プロジェクト ' + name + ' のタスクリスト*\\n'\n slack_message = {'channel': SLACK_CHANNEL, 'icon_emoji': ':todoist:',\n 'text': title, 'attachments': [{'color': '#36a64f', 'fields': [{\n 'value': msg}]}]}\n", "step-5": "# coding: utf-8\n\nimport datetime\nimport json\nimport requests\nimport os\nimport re\nimport sys\nfrom todoist.api import TodoistAPI\n\n#SLACK_CHANNEL = os.environ['SLACK_CHANNEL']\n#SLACK_POSTURL = os.environ['SLACK_POSTURL']\nTDIAPI = TodoistAPI(os.environ['TODOISTAPITOKEN'], cache=False)\nTDIAPI.sync()\nname = os.environ['TODOIST_PJT']\n\ndef lambda_handler(event, context):\n if event[\"function\"] == 'tasklist':\n msg = tasklist(name)\n if event[\"function\"] == 'activity':\n msg = activity(name)\n return\n\ndef activity(name):\n actlogs = TDIAPI.activity.get()\n pjts = TDIAPI.state['projects']\n\n for projects_id in pjts:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n else:\n print('[INFO] Not match project name')\n\n event_list = []\n for events in actlogs['events']:\n today = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n '''\n todoistのevent_dateはUTCで且つstringなので一度datetime型に変換して、+9時間する\n そこから年月日だけにして、stringに戻して日本時間の今日のデータかをチェック\n '''\n todoist_times = datetime.datetime.strptime(events['event_date'], '%Y-%m-%dT%H:%M:%SZ') + datetime.timedelta(hours = 9)\n todoist_date = str(todoist_times.strftime(\"%Y-%m-%d\"))\n\n if events['event_type'] == 'completed' and todoist_date == today and events['parent_project_id'] == tasks_project_id:\n event_list.append(events['extra_data']['content'])\n\n print(event_list)\n return event_list\n\ndef tasklist(name):\n\n pjts = TDIAPI.state['projects']\n items = TDIAPI.state['items']\n labels = TDIAPI.state['labels']\n sects = TDIAPI.state['sections']\n\n inbox_list = []\n doing_list = []\n review_list = []\n any_list = []\n\n for projects_id in list:\n if projects_id['name'] == name:\n tasks_project_id = projects_id['id']\n break\n\n try:\n tasks_project_id\n except NameError:\n print(\"プロジェクト名が正しくありません。プロジェクト名を正しく入力してください。\")\n return\n\n print(labels)\n sys.exit()\n\n for item in items:\n l_content = item['content']\n l_pjt_name = [ pjt['name'] for pjt in pjts if item['project_id'] == pjt['id'] ]\n l_sec_name = [ sect['name'] for sect in sects if item['section_id'] == sect['id']]\n #print('+++')\n #print(l_pjt_id)\n #print(l_content)\n #print(l_sec_name[0])\n\n if l_sec_name is not None and l_sec_name[0] == 'ToDo':\n print(l_sec_name)\n #if item['checked'] == 0 and item['project_id'] == tasks_project_id:\n\n #taskcontent = '- ' + item['content']\n #slackmessage.append(taskcontent)\n #print(taskcontent)\n #print(slackmessage)\n #message = '\\n'.join(slackmessage)\n return\n\ndef slack_notify():\n title = \"*[定期通知] プロジェクト \" + name + \" のタスクリスト*\\n\"\n slack_message = {\n 'channel': SLACK_CHANNEL,\n 'icon_emoji': \":todoist:\",\n 'text': title,\n \"attachments\": [\n {\n \"color\": \"#36a64f\",\n \"fields\": [\n {\n \"value\": msg,\n },\n ],\n }\n ]\n }\n #requests.post(SLACK_POSTURL, data=json.dumps(slack_message))\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
import matplotlib.pyplot as plt import numpy as np import random plt.ion() def draw_board(grid_size, hole_pos,wall_pos): board = np.ones((grid_size,grid_size)) board[wall_pos] = 10 board[hole_pos] = 0 return board class Game(): """ A class which implements the Gobble game. Initializes with a grid_size and path_radius. There is an "example" method to illustrate how the game is played. """ def __init__(self, grid_size): self.grid_size = grid_size #self.player_pos = (np.random.randint(grid_size),np.random.randint(grid_size)) self.start_game(grid_size) #self.show_board() plt.title("Nate's Lame Game") def start_game(self, grid_size): self.score = 0 self.goal_pos = (0,0) self.wall_pos = (grid_size//2,np.arange(5)) self.board = draw_board(grid_size, self.goal_pos, self.wall_pos) self.player_pos = (9,9) self.board[self.player_pos] = .5 # self.board[self.player_pos] = .5 def show_board(self): plt.imshow(self.board) def update_board(self, new_pos, show_plt=False): # if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) < np.sum(np.abs(np.array(self.player_pos) - np.array(self.goal_pos))): # self.score += 1 # else: # self.score -= 1 if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1: self.score += 100 self.board[self.player_pos] = 1 self.board[new_pos] = .5 self.player_pos = new_pos if show_plt: self.show_board() if self.check_end(): print('Game over yo') self.start_game(self.grid_size) return True return False def get_actions(self): x,y = self.player_pos actions = [(x+1,y), (x,y+1), (x-1,y), (x,y-1)] v_dim = self.board.shape[0] valid = [] for a in actions: if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1] > -1 and self.board[a] != 10: valid.append(a) return valid def check_end(self): if self.player_pos == self.goal_pos: print('game is finished') self.score = 0 return True else: return False def example(self): """ Illustrates how to play the game. """ while self.check_end() == False: plt.pause(0.25) end = self.update_board(random.choice(self.get_actions()), True)
normal
{ "blob_id": "a74f2050a057f579a8a8b77ac04ef09073cdb6cf", "index": 6057, "step-1": "<mask token>\n\n\nclass Game:\n <mask token>\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n", "step-2": "<mask token>\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n", "step-3": "<mask token>\n\n\ndef draw_board(grid_size, hole_pos, wall_pos):\n board = np.ones((grid_size, grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n", "step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nplt.ion()\n\n\ndef draw_board(grid_size, hole_pos, wall_pos):\n board = np.ones((grid_size, grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n", "step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nplt.ion()\n\ndef draw_board(grid_size, hole_pos,wall_pos):\n board = np.ones((grid_size,grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\nclass Game():\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n def __init__(self, grid_size):\n self.grid_size = grid_size\n #self.player_pos = (np.random.randint(grid_size),np.random.randint(grid_size))\n self.start_game(grid_size)\n #self.show_board()\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = (0,0)\n self.wall_pos = (grid_size//2,np.arange(5))\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = (9,9)\n self.board[self.player_pos] = .5\n \n # self.board[self.player_pos] = .5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n # if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) < np.sum(np.abs(np.array(self.player_pos) - np.array(self.goal_pos))):\n # self.score += 1\n # else:\n # self.score -= 1\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n\n self.board[self.player_pos] = 1\n self.board[new_pos] = .5\n self.player_pos = new_pos\n\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n\n\n return False\n\n def get_actions(self):\n x,y = self.player_pos\n actions = [(x+1,y), (x,y+1),\n (x-1,y), (x,y-1)]\n\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1] > -1 and self.board[a] != 10:\n valid.append(a)\n\n return valid\n\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n\n", "step-ids": [ 8, 9, 10, 12, 13 ] }
[ 8, 9, 10, 12, 13 ]
"""Primer3 input form. For details on input params see: https://primer3.org/manual.html#globalTags """ from django import forms from django.core.exceptions import ValidationError from .fasta import Fasta class PrimerForm(forms.Form): """Collect user input to run primer prediction.""" fasta = forms.CharField(initial="") # Primer size range primer_min = forms.IntegerField(initial=18, max_value=35) primer_max = forms.IntegerField(initial=27, max_value=35) primer_optimum = forms.IntegerField(initial=20, max_value=35) # Amplicon size range amplicon_min = forms.IntegerField( initial=60, min_value=50, max_value=20000) amplicon_max = forms.IntegerField( initial=80, min_value=50, max_value=20000) # Primer melting temperature range tm_min = forms.FloatField(initial=59, min_value=0, max_value=100) tm_max = forms.FloatField(initial=61, min_value=0, max_value=100) tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100) # Max self complement self_dimer_any = forms.FloatField( initial=8.0, min_value=0, max_value=9999.99) # Max self complement 3' self_dimer_end = forms.FloatField( initial=3.0, min_value=0, max_value=9999.99) # GC content gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100) gc_clamp = forms.IntegerField(initial=0) def clean(self): """Validate and return user input.""" data = self.cleaned_data data['fasta'] = Fasta.from_string(data['fasta']) validate_fasta(data) return data def validate_fasta(data): """Validate input sequence lengths.""" for sequence in data['fasta'].values(): print(f'Sequence length {len(sequence)} nt') if len(sequence) < data['amplicon_min']: raise ValidationError({'fasta': f'Input sequence must be longer than minimum' + f' amplicon length parameter ({data["amplicon_min"]} nt)' })
normal
{ "blob_id": "6291375738db7914d551f9a1c6d2897b7d236b87", "index": 1742, "step-1": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n <mask token>\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n", "step-4": "<mask token>\nfrom django import forms\nfrom django.core.exceptions import ValidationError\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n fasta = forms.CharField(initial='')\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n amplicon_min = forms.IntegerField(initial=60, min_value=50, max_value=20000\n )\n amplicon_max = forms.IntegerField(initial=80, min_value=50, max_value=20000\n )\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n self_dimer_any = forms.FloatField(initial=8.0, min_value=0, max_value=\n 9999.99)\n self_dimer_end = forms.FloatField(initial=3.0, min_value=0, max_value=\n 9999.99)\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta': \n f'Input sequence must be longer than minimum' +\n f\" amplicon length parameter ({data['amplicon_min']} nt)\"})\n", "step-5": "\"\"\"Primer3 input form.\n\nFor details on input params see:\nhttps://primer3.org/manual.html#globalTags\n\"\"\"\n\nfrom django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom .fasta import Fasta\n\n\nclass PrimerForm(forms.Form):\n \"\"\"Collect user input to run primer prediction.\"\"\"\n\n fasta = forms.CharField(initial=\"\")\n # Primer size range\n primer_min = forms.IntegerField(initial=18, max_value=35)\n primer_max = forms.IntegerField(initial=27, max_value=35)\n primer_optimum = forms.IntegerField(initial=20, max_value=35)\n # Amplicon size range\n amplicon_min = forms.IntegerField(\n initial=60, min_value=50, max_value=20000)\n amplicon_max = forms.IntegerField(\n initial=80, min_value=50, max_value=20000)\n # Primer melting temperature range\n tm_min = forms.FloatField(initial=59, min_value=0, max_value=100)\n tm_max = forms.FloatField(initial=61, min_value=0, max_value=100)\n tm_optimum = forms.FloatField(initial=60, min_value=0, max_value=100)\n # Max self complement\n self_dimer_any = forms.FloatField(\n initial=8.0, min_value=0, max_value=9999.99)\n # Max self complement 3'\n self_dimer_end = forms.FloatField(\n initial=3.0, min_value=0, max_value=9999.99)\n # GC content\n gc_min = forms.FloatField(initial=20.0, min_value=0, max_value=100)\n gc_clamp = forms.IntegerField(initial=0)\n\n def clean(self):\n \"\"\"Validate and return user input.\"\"\"\n data = self.cleaned_data\n data['fasta'] = Fasta.from_string(data['fasta'])\n validate_fasta(data)\n return data\n\n\ndef validate_fasta(data):\n \"\"\"Validate input sequence lengths.\"\"\"\n for sequence in data['fasta'].values():\n print(f'Sequence length {len(sequence)} nt')\n if len(sequence) < data['amplicon_min']:\n raise ValidationError({'fasta':\n f'Input sequence must be longer than minimum'\n + f' amplicon length parameter ({data[\"amplicon_min\"]} nt)'\n })\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
#!/usr/bin/env python """\ Simple g-code streaming script for grbl """ import serial import time import csv import json import RPi.GPIO as GPIO from multiprocessing import Process, Queue class motion(): def __init__(self): # Open grbl serial port #self.s = serial.Serial("/dev/ttyUSB0",baudrate=115200,xonxoff=True,timeout=1) self.s = serial.Serial("/dev/ttyUSB0", baudrate=115200, timeout=0.1, rtscts=True, xonxoff=False) self.rsp='' self.posx=0.0 self.posy=0.0 self.positions_file = '/home/pi/Work/Wall2.0/system/positions.csv' self.home_position_file = '/home/pi/Work/Wall2.0/system/home.csv' self.mode = 'delay' self.sensor_pin = 3 self.interval = 1 GPIO.setmode(GPIO.BOARD) # GPIO.setup(self.sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP) GPIO.setup(self.sensor_pin, GPIO.IN) # Wake up grbl self.s.write("\r\n\r\n") time.sleep(2) # Wait for grbl to initialize self.s.flushInput() # Flush startup text in serial input self.feedrate = 100 self.update_feedrate(0) with open(self.positions_file,'w') as f: f.write('posx,posy\n') self.homex=None self.homey=None with open(self.home_position_file,'r') as f: lines = csv.DictReader(f) for l in lines: print 'x_home: '+l['homex'] print 'y_home: '+l['homey'] self.homex = float(l['homex']) self.homey = float(l['homey']) # set origin offset #self.send("g92 x0 y0") self.set_relative_position() self.pos_queue = Queue() self.serial_proc = Process(target=self.get_response, args=(self.pos_queue,)) self.serial_proc.start() def update_feedrate(self, feedrate): tmp = self.feedrate + feedrate if(tmp >= 100) and (tmp <= 800): self.feedrate = tmp # feedrate speed self.send("f"+str(self.feedrate)) def update_interval(self, interval): if(self.interval >= 1) and (self.interval <= 10): self.interval += interval def send(self, cmd): print 'Sending: ' + cmd self.s.write(cmd + '\n') # Send g-code block to grbl def move(self,sign_x, sign_y): x = "x"+str(sign_x*10) y = "y"+str(sign_y*10) #self.send("%") self.send(" ".join(["g1",x,y])) def move_to_position(self,x,y): x = "x"+str(x) y = "y"+str(y) self.send(" ".join(["g1",x,y])) def stop(self): self.send("!") self.send("%") if (self.homex!=None) and (self.homey!=None): time.sleep(0.5) self.set_absolute_position() self.update_current_position() self.move_to_position(self.homex,self.homey) self.set_relative_position() def disconnect(self): # Close file and serial port self.s.close() def get_response(self, q): while(1): tmp = self.s.readline() tmp = tmp.strip() if tmp is not '': try: tmp = json.loads(tmp) print tmp if 'r' in tmp.keys(): if 'sr' in tmp['r'].keys(): tmp = tmp['r'] if 'sr' in tmp.keys(): if 'posx' in tmp['sr'].keys(): self.posx=tmp['sr']['posx'] if 'posy' in tmp['sr'].keys(): self.posy=tmp['sr']['posy'] q.put((self.posx, self.posy)) print 'pos1: '+str((self.posx, self.posy)) except ValueError: print "get_response chocked" self.stop() time.sleep(1) else: time.sleep(.2) def record_current_position(self): self.send('{"sr":null}') print "Saving" # TODO: Check if serial_proc is running? self.update_current_position() with open(self.positions_file,'a') as f: f.write(str(self.posx)+','+str(self.posy)+'\n') def record_home_position(self): self.send('{"sr":null}') print "Saving home" # TODO: Check if serial_proc is running? self.update_current_position() self.homex = self.posx self.homey = self.posy with open(self.home_position_file,'w') as f: f.write('homex,homey\n') f.write(str(self.posx)+','+str(self.posy)+'\n') def delete_home_position(self): print "Deleting home" with open(self.home_position_file,'w') as f: f.write('homex,homey\n') self.homex = None self.homey = None def update_current_position(self): while not self.pos_queue.empty(): self.posx, self.posy = self.pos_queue.get() def getTrigger(self): return GPIO.input(self.sensor_pin) def changeMode(self): if self.mode == 'delay': self.mode = 'sensor' elif self.mode == 'sensor': self.mode = 'delay' def set_absolute_position(self): # absolute mode self.send("g90") def set_relative_position(self): # relative mode self.send("g91") def playback_saved_positions(self): self.set_absolute_position() self.update_current_position() with open(self.positions_file) as f: lines = csv.DictReader(f) for l in lines: print 'x_dst: '+l['posx']+' - '+str(self.posx) print 'y_dst: '+l['posy']+' - '+str(self.posy) x_dst = float(l['posx'])#-self.posx y_dst = float(l['posy'])#-self.posy x = ' x'+str((x_dst)) y = ' y'+str((y_dst)) print(x,y) self.send('g1'+x+y) while(1): self.update_current_position() if (self.posx != float(l['posx'])) or \ (self.posy != float(l['posy'])): time.sleep(.1) else: break if(self.mode == 'delay'): time.sleep(self.interval) elif(self.mode == 'sensor'): num_strikes = 0 while num_strikes < self.interval: while(not self.getTrigger()): time.sleep(.01) num_strikes += 1 # relative mode self.send("g91")
normal
{ "blob_id": "ac2d4372f8913ea9ae1066833cca09985e521f99", "index": 383, "step-1": "#!/usr/bin/env python\n\"\"\"\\\nSimple g-code streaming script for grbl\n\"\"\"\n \nimport serial\nimport time\nimport csv\nimport json\nimport RPi.GPIO as GPIO\nfrom multiprocessing import Process, Queue\nclass motion():\n def __init__(self):\n # Open grbl serial port\n #self.s = serial.Serial(\"/dev/ttyUSB0\",baudrate=115200,xonxoff=True,timeout=1)\n self.s = serial.Serial(\"/dev/ttyUSB0\",\n baudrate=115200,\n timeout=0.1,\n rtscts=True,\n xonxoff=False)\n self.rsp=''\n self.posx=0.0\n self.posy=0.0\n self.positions_file = '/home/pi/Work/Wall2.0/system/positions.csv'\n self.home_position_file = '/home/pi/Work/Wall2.0/system/home.csv'\n self.mode = 'delay'\n self.sensor_pin = 3\n self.interval = 1\n GPIO.setmode(GPIO.BOARD)\n# GPIO.setup(self.sensor_pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n GPIO.setup(self.sensor_pin, GPIO.IN)\n\n # Wake up grbl\n self.s.write(\"\\r\\n\\r\\n\")\n time.sleep(2) # Wait for grbl to initialize\n self.s.flushInput() # Flush startup text in serial input \n\n self.feedrate = 100\n self.update_feedrate(0)\n\n with open(self.positions_file,'w') as f:\n f.write('posx,posy\\n')\n\n self.homex=None\n self.homey=None\n with open(self.home_position_file,'r') as f:\n lines = csv.DictReader(f)\n for l in lines:\n print 'x_home: '+l['homex']\n print 'y_home: '+l['homey']\n self.homex = float(l['homex'])\n self.homey = float(l['homey'])\n\n # set origin offset\n #self.send(\"g92 x0 y0\")\n\n self.set_relative_position()\n\n self.pos_queue = Queue()\n self.serial_proc = Process(target=self.get_response,\n args=(self.pos_queue,))\n\n self.serial_proc.start()\n\n def update_feedrate(self, feedrate):\n tmp = self.feedrate + feedrate\n if(tmp >= 100) and (tmp <= 800):\n self.feedrate = tmp\n # feedrate speed\n self.send(\"f\"+str(self.feedrate))\n\n def update_interval(self, interval):\n if(self.interval >= 1) and (self.interval <= 10):\n self.interval += interval\n \n def send(self, cmd): \n print 'Sending: ' + cmd\n self.s.write(cmd + '\\n') # Send g-code block to grbl\n\n def move(self,sign_x, sign_y):\n x = \"x\"+str(sign_x*10) \n y = \"y\"+str(sign_y*10) \n #self.send(\"%\")\n self.send(\" \".join([\"g1\",x,y]))\n\n def move_to_position(self,x,y):\n x = \"x\"+str(x) \n y = \"y\"+str(y) \n self.send(\" \".join([\"g1\",x,y]))\n\n def stop(self):\n self.send(\"!\")\n self.send(\"%\")\n if (self.homex!=None) and (self.homey!=None):\n time.sleep(0.5)\n self.set_absolute_position()\n self.update_current_position()\n self.move_to_position(self.homex,self.homey)\n self.set_relative_position()\n\n def disconnect(self):\n # Close file and serial port\n self.s.close()\n\n def get_response(self, q):\n while(1):\n tmp = self.s.readline()\n tmp = tmp.strip()\n if tmp is not '':\n try:\n tmp = json.loads(tmp)\n print tmp\n if 'r' in tmp.keys():\n if 'sr' in tmp['r'].keys():\n tmp = tmp['r']\n if 'sr' in tmp.keys():\n if 'posx' in tmp['sr'].keys():\n self.posx=tmp['sr']['posx']\n if 'posy' in tmp['sr'].keys():\n self.posy=tmp['sr']['posy']\n q.put((self.posx, self.posy))\n print 'pos1: '+str((self.posx, self.posy))\n except ValueError:\n print \"get_response chocked\"\n self.stop()\n time.sleep(1)\n else:\n time.sleep(.2)\n\n def record_current_position(self):\n self.send('{\"sr\":null}')\n print \"Saving\"\n # TODO: Check if serial_proc is running?\n self.update_current_position()\n with open(self.positions_file,'a') as f:\n f.write(str(self.posx)+','+str(self.posy)+'\\n')\n\n def record_home_position(self):\n self.send('{\"sr\":null}')\n print \"Saving home\"\n # TODO: Check if serial_proc is running?\n self.update_current_position()\n self.homex = self.posx\n self.homey = self.posy\n with open(self.home_position_file,'w') as f:\n f.write('homex,homey\\n')\n f.write(str(self.posx)+','+str(self.posy)+'\\n')\n\n def delete_home_position(self):\n print \"Deleting home\"\n with open(self.home_position_file,'w') as f:\n f.write('homex,homey\\n')\n self.homex = None\n self.homey = None\n\n def update_current_position(self):\n while not self.pos_queue.empty():\n self.posx, self.posy = self.pos_queue.get()\n\n def getTrigger(self):\n return GPIO.input(self.sensor_pin)\n\n def changeMode(self):\n if self.mode == 'delay':\n self.mode = 'sensor'\n elif self.mode == 'sensor':\n self.mode = 'delay'\n\n def set_absolute_position(self):\n # absolute mode \n self.send(\"g90\")\n\n def set_relative_position(self):\n # relative mode \n self.send(\"g91\")\n\n def playback_saved_positions(self):\n self.set_absolute_position()\n self.update_current_position()\n with open(self.positions_file) as f:\n lines = csv.DictReader(f)\n for l in lines:\n print 'x_dst: '+l['posx']+' - '+str(self.posx)\n print 'y_dst: '+l['posy']+' - '+str(self.posy)\n x_dst = float(l['posx'])#-self.posx\n y_dst = float(l['posy'])#-self.posy\n x = ' x'+str((x_dst))\n y = ' y'+str((y_dst))\n print(x,y)\n self.send('g1'+x+y)\n while(1):\n self.update_current_position()\n if (self.posx != float(l['posx'])) or \\\n (self.posy != float(l['posy'])):\n time.sleep(.1)\n else:\n break\n\n if(self.mode == 'delay'):\n time.sleep(self.interval)\n elif(self.mode == 'sensor'):\n num_strikes = 0\n while num_strikes < self.interval:\n while(not self.getTrigger()):\n time.sleep(.01)\n num_strikes += 1\n # relative mode \n self.send(\"g91\")\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import math from Config import defaults as df from Utils.controls import sigmoid_decay def f1(phi, phi_o, d): """sinusoidally growing function between (phi_o-d) to phi_o""" return 1 - sigmoid_decay(phi, phi_o, d) def f2(phi, sigma): """normal distribution""" return math.exp(-phi ** 2 / sigma ** 2) def f3(phi, a): """sharp peak""" return a ** 2 / (phi + a) ** 2 def optofitness(op_array, n_obj=1): """apply respective transfer functions to an array of order parameters **order of elements matters """ d = 5 f_speed = f1(op_array[1], df.v_flock, df.v_tol) f_coll = f3(op_array[3], df.a_tol) f_disc = f3(op_array[4], df.num_agents / 5) f_wall = f2(op_array[0], df.r_tol) f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5) if op_array[2] > 0: f_corr = op_array[2] else: f_corr = 0 time_fit = 1 # (1-sigmoid_decay(op_array[6], df.max_sim_time-df.wait_time, 200)) if n_obj == 2: # F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster # F2 = -time_fit * f_wall * f_coll F2 = -time_fit *f_coll * f_corr * f_disc * f_cluster F1 = -time_fit * f_wall * f_speed return round(F1, d), round(F2, d) elif n_obj == 3: F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster F2 = -time_fit * f_wall F3 = -time_fit * f_coll return round(F1, d), round(F2, d), round(F3, d) elif n_obj == 'all': return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(f_coll, d), round(f_disc, d), round(f_cluster, d) F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster return round(F1, d)
normal
{ "blob_id": "19bb3cd0c7862f39a78479d9a9703ebef198fc73", "index": 3677, "step-1": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\n<mask token>\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-3": "<mask token>\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-4": "import math\nfrom Config import defaults as df\nfrom Utils.controls import sigmoid_decay\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1\n if n_obj == 2:\n F2 = -time_fit * f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(\n f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-5": "import math\nfrom Config import defaults as df\nfrom Utils.controls import sigmoid_decay\n\n\ndef f1(phi, phi_o, d):\n \"\"\"sinusoidally growing function between (phi_o-d) to phi_o\"\"\"\n return 1 - sigmoid_decay(phi, phi_o, d)\n\n\ndef f2(phi, sigma):\n \"\"\"normal distribution\"\"\"\n return math.exp(-phi ** 2 / sigma ** 2)\n\n\ndef f3(phi, a):\n \"\"\"sharp peak\"\"\"\n return a ** 2 / (phi + a) ** 2\n\n\ndef optofitness(op_array, n_obj=1):\n \"\"\"apply respective transfer functions to an array of order parameters\n **order of elements matters\n \"\"\"\n d = 5\n f_speed = f1(op_array[1], df.v_flock, df.v_tol)\n f_coll = f3(op_array[3], df.a_tol)\n f_disc = f3(op_array[4], df.num_agents / 5)\n f_wall = f2(op_array[0], df.r_tol)\n f_cluster = f1(op_array[5], df.num_agents / 5, df.num_agents / 5)\n if op_array[2] > 0:\n f_corr = op_array[2]\n else:\n f_corr = 0\n time_fit = 1 # (1-sigmoid_decay(op_array[6], df.max_sim_time-df.wait_time, 200))\n if n_obj == 2:\n # F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n # F2 = -time_fit * f_wall * f_coll\n F2 = -time_fit *f_coll * f_corr * f_disc * f_cluster\n F1 = -time_fit * f_wall * f_speed\n return round(F1, d), round(F2, d)\n\n elif n_obj == 3:\n F1 = -time_fit * f_speed * f_corr * f_disc * f_cluster\n F2 = -time_fit * f_wall\n F3 = -time_fit * f_coll\n return round(F1, d), round(F2, d), round(F3, d)\n elif n_obj == 'all':\n return round(f_wall, d), round(f_speed, d), round(f_corr, d), round(f_coll, d), round(f_disc, d), round(f_cluster, d)\n F1 = -time_fit * f_speed * f_coll * f_disc * f_wall * f_corr * f_cluster\n return round(F1, d)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- """ Created on Wed Dec 20 09:54:08 2017 @author: chuang """ import os import pickle #from collections import Counter #import user_replace import jieba import re from multiprocessing import Pool #%% # parameters for processing the dataset DATA_PATH = '../data/weibo_single/raw' USER_DICT = './userdict.txt' PROCESSED_PATH = '../data/weibo_single/processed' ENCODING = 'utf-8' jieba.load_userdict(USER_DICT) DELETE = ['\[.*?\]','\u200b'] MULTI = True #%% def replace_tokens(text,replace_dict=None): # for k,v in replace_dict.items(): # pattern = re.compile("|".join(v)) # text = pattern.sub(k,text) pattern = re.compile("|".join(DELETE)) text = re.sub(pattern,'',text) return text def read_txt(file_path,encoding): with open(os.path.join(DATA_PATH,file_path), 'r',encoding=encoding,errors='replace') as f: text = f.read() text = replace_tokens(text) #,user_replace.replace_dict convs = text.split('\n\n') lines = [c.split('\n') for c in convs] lines = [[i.strip() for i in c if i != ''] for c in lines] ## get ride of empties sentences lines = [c for c in lines if len(c)>1] return lines def context_answers(convos): context,answers = [],[] for convo in convos: for index,line in enumerate(convo[:-1]): context.append(line) answers.append(convo[index+1]) assert len(context) == len(answers) return context,answers def _basic_tokenizer(line,normalize_digits=False): """ A basic tokenizer to tokenize text into tokens """ _DIGIT_RE = re.compile(r"\d+") ## find digits words = [] tokens = list(jieba.cut(line.strip().lower())) if normalize_digits: for token in tokens: m = _DIGIT_RE.search(token) if m is None: words.append(token) else: words.append('_数字_') else: words = tokens return words def _tokenized_data(context,answers): train_enc_tokens = [_basic_tokenizer(t) for t in context] print('Train_enc_token done.') train_dec_tokens = [_basic_tokenizer(t) for t in answers] print('Train_dec_token done.') return train_enc_tokens, train_dec_tokens def _filter(ask_sent,answer_sent): if len(ask_sent)<3 or len(answer_sent)<2: return False if "@" in ask_sent or "@" in answer_sent: return False return True def filter_data(context,answers): ''' filter some answer that is too short or has @ in it ''' context_return, answers_return = [],[] for i in range(len(context)): c = context[i] a = answers[i] c_sent = " ".join(c) a_sent = " ".join(a) if _filter(c_sent,a_sent): context_return.append(c) answers_return.append(a) return context_return,answers_return def save_tokenized_data(train_enc_tokens,train_dec_tokens,save_file_name): save_file_path = os.path.join(PROCESSED_PATH,save_file_name) pickle.dump((train_enc_tokens, train_dec_tokens,[],[]),open(save_file_path,'wb')) print('Data saved') #%% if __name__ == "__main__": data_files = os.listdir(DATA_PATH) ## just do two files for now, too many data #%% asks,ans = [],[] for idx,file_path in enumerate(data_files): #file_path = 'multi_1_4.data' convos = read_txt(file_path,ENCODING) context,answers = context_answers(convos) asks.extend(context) ans.extend(answers) print('finish {}'.format(file_path)) print('Total length {}'.format(len(asks))) #%% if MULTI: print('tokanizing, multi process') cores = os.cpu_count()-2 p = Pool(cores) context = p.map(_basic_tokenizer,asks) print('Finish tokenizing ask sentences') answers = p.map(_basic_tokenizer,ans) print('Finish tokenizing answer sentences') p.close() p.join() else: context,answers = _tokenized_data(asks,ans) print("Total lentgh after tokenization: {}".format(len(context))) #%% context,answers = filter_data(context,answers) print("Total lentgh after filtering: {}".format(len(context))) #%% ## save into pickles save_tokenized_data(context,answers,'processed_tokens.p') #%% #print(context[:50]) #print(answers[:50])
normal
{ "blob_id": "5fd54de3b2f9c2e18a283d016fc16e0e622dc6a0", "index": 8415, "step-1": "<mask token>\n\n\ndef replace_tokens(text, replace_dict=None):\n pattern = re.compile('|'.join(DELETE))\n text = re.sub(pattern, '', text)\n return text\n\n\ndef read_txt(file_path, encoding):\n with open(os.path.join(DATA_PATH, file_path), 'r', encoding=encoding,\n errors='replace') as f:\n text = f.read()\n text = replace_tokens(text)\n convs = text.split('\\n\\n')\n lines = [c.split('\\n') for c in convs]\n lines = [[i.strip() for i in c if i != ''] for c in lines]\n lines = [c for c in lines if len(c) > 1]\n return lines\n\n\n<mask token>\n\n\ndef _basic_tokenizer(line, normalize_digits=False):\n \"\"\"\n A basic tokenizer to tokenize text into tokens\n \"\"\"\n _DIGIT_RE = re.compile('\\\\d+')\n words = []\n tokens = list(jieba.cut(line.strip().lower()))\n if normalize_digits:\n for token in tokens:\n m = _DIGIT_RE.search(token)\n if m is None:\n words.append(token)\n else:\n words.append('_数字_')\n else:\n words = tokens\n return words\n\n\ndef _tokenized_data(context, answers):\n train_enc_tokens = [_basic_tokenizer(t) for t in context]\n print('Train_enc_token done.')\n train_dec_tokens = [_basic_tokenizer(t) for t in answers]\n print('Train_dec_token done.')\n return train_enc_tokens, train_dec_tokens\n\n\ndef _filter(ask_sent, answer_sent):\n if len(ask_sent) < 3 or len(answer_sent) < 2:\n return False\n if '@' in ask_sent or '@' in answer_sent:\n return False\n return True\n\n\n<mask token>\n\n\ndef save_tokenized_data(train_enc_tokens, train_dec_tokens, save_file_name):\n save_file_path = os.path.join(PROCESSED_PATH, save_file_name)\n pickle.dump((train_enc_tokens, train_dec_tokens, [], []), open(\n save_file_path, 'wb'))\n print('Data saved')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef replace_tokens(text, replace_dict=None):\n pattern = re.compile('|'.join(DELETE))\n text = re.sub(pattern, '', text)\n return text\n\n\ndef read_txt(file_path, encoding):\n with open(os.path.join(DATA_PATH, file_path), 'r', encoding=encoding,\n errors='replace') as f:\n text = f.read()\n text = replace_tokens(text)\n convs = text.split('\\n\\n')\n lines = [c.split('\\n') for c in convs]\n lines = [[i.strip() for i in c if i != ''] for c in lines]\n lines = [c for c in lines if len(c) > 1]\n return lines\n\n\n<mask token>\n\n\ndef _basic_tokenizer(line, normalize_digits=False):\n \"\"\"\n A basic tokenizer to tokenize text into tokens\n \"\"\"\n _DIGIT_RE = re.compile('\\\\d+')\n words = []\n tokens = list(jieba.cut(line.strip().lower()))\n if normalize_digits:\n for token in tokens:\n m = _DIGIT_RE.search(token)\n if m is None:\n words.append(token)\n else:\n words.append('_数字_')\n else:\n words = tokens\n return words\n\n\ndef _tokenized_data(context, answers):\n train_enc_tokens = [_basic_tokenizer(t) for t in context]\n print('Train_enc_token done.')\n train_dec_tokens = [_basic_tokenizer(t) for t in answers]\n print('Train_dec_token done.')\n return train_enc_tokens, train_dec_tokens\n\n\ndef _filter(ask_sent, answer_sent):\n if len(ask_sent) < 3 or len(answer_sent) < 2:\n return False\n if '@' in ask_sent or '@' in answer_sent:\n return False\n return True\n\n\ndef filter_data(context, answers):\n \"\"\"\n filter some answer that is too short or has @ in it \n \"\"\"\n context_return, answers_return = [], []\n for i in range(len(context)):\n c = context[i]\n a = answers[i]\n c_sent = ' '.join(c)\n a_sent = ' '.join(a)\n if _filter(c_sent, a_sent):\n context_return.append(c)\n answers_return.append(a)\n return context_return, answers_return\n\n\ndef save_tokenized_data(train_enc_tokens, train_dec_tokens, save_file_name):\n save_file_path = os.path.join(PROCESSED_PATH, save_file_name)\n pickle.dump((train_enc_tokens, train_dec_tokens, [], []), open(\n save_file_path, 'wb'))\n print('Data saved')\n\n\n<mask token>\n", "step-3": "<mask token>\nDATA_PATH = '../data/weibo_single/raw'\nUSER_DICT = './userdict.txt'\nPROCESSED_PATH = '../data/weibo_single/processed'\nENCODING = 'utf-8'\njieba.load_userdict(USER_DICT)\nDELETE = ['\\\\[.*?\\\\]', '\\u200b']\nMULTI = True\n\n\ndef replace_tokens(text, replace_dict=None):\n pattern = re.compile('|'.join(DELETE))\n text = re.sub(pattern, '', text)\n return text\n\n\ndef read_txt(file_path, encoding):\n with open(os.path.join(DATA_PATH, file_path), 'r', encoding=encoding,\n errors='replace') as f:\n text = f.read()\n text = replace_tokens(text)\n convs = text.split('\\n\\n')\n lines = [c.split('\\n') for c in convs]\n lines = [[i.strip() for i in c if i != ''] for c in lines]\n lines = [c for c in lines if len(c) > 1]\n return lines\n\n\ndef context_answers(convos):\n context, answers = [], []\n for convo in convos:\n for index, line in enumerate(convo[:-1]):\n context.append(line)\n answers.append(convo[index + 1])\n assert len(context) == len(answers)\n return context, answers\n\n\ndef _basic_tokenizer(line, normalize_digits=False):\n \"\"\"\n A basic tokenizer to tokenize text into tokens\n \"\"\"\n _DIGIT_RE = re.compile('\\\\d+')\n words = []\n tokens = list(jieba.cut(line.strip().lower()))\n if normalize_digits:\n for token in tokens:\n m = _DIGIT_RE.search(token)\n if m is None:\n words.append(token)\n else:\n words.append('_数字_')\n else:\n words = tokens\n return words\n\n\ndef _tokenized_data(context, answers):\n train_enc_tokens = [_basic_tokenizer(t) for t in context]\n print('Train_enc_token done.')\n train_dec_tokens = [_basic_tokenizer(t) for t in answers]\n print('Train_dec_token done.')\n return train_enc_tokens, train_dec_tokens\n\n\ndef _filter(ask_sent, answer_sent):\n if len(ask_sent) < 3 or len(answer_sent) < 2:\n return False\n if '@' in ask_sent or '@' in answer_sent:\n return False\n return True\n\n\ndef filter_data(context, answers):\n \"\"\"\n filter some answer that is too short or has @ in it \n \"\"\"\n context_return, answers_return = [], []\n for i in range(len(context)):\n c = context[i]\n a = answers[i]\n c_sent = ' '.join(c)\n a_sent = ' '.join(a)\n if _filter(c_sent, a_sent):\n context_return.append(c)\n answers_return.append(a)\n return context_return, answers_return\n\n\ndef save_tokenized_data(train_enc_tokens, train_dec_tokens, save_file_name):\n save_file_path = os.path.join(PROCESSED_PATH, save_file_name)\n pickle.dump((train_enc_tokens, train_dec_tokens, [], []), open(\n save_file_path, 'wb'))\n print('Data saved')\n\n\nif __name__ == '__main__':\n data_files = os.listdir(DATA_PATH)\n asks, ans = [], []\n for idx, file_path in enumerate(data_files):\n convos = read_txt(file_path, ENCODING)\n context, answers = context_answers(convos)\n asks.extend(context)\n ans.extend(answers)\n print('finish {}'.format(file_path))\n print('Total length {}'.format(len(asks)))\n if MULTI:\n print('tokanizing, multi process')\n cores = os.cpu_count() - 2\n p = Pool(cores)\n context = p.map(_basic_tokenizer, asks)\n print('Finish tokenizing ask sentences')\n answers = p.map(_basic_tokenizer, ans)\n print('Finish tokenizing answer sentences')\n p.close()\n p.join()\n else:\n context, answers = _tokenized_data(asks, ans)\n print('Total lentgh after tokenization: {}'.format(len(context)))\n context, answers = filter_data(context, answers)\n print('Total lentgh after filtering: {}'.format(len(context)))\n save_tokenized_data(context, answers, 'processed_tokens.p')\n", "step-4": "<mask token>\nimport os\nimport pickle\nimport jieba\nimport re\nfrom multiprocessing import Pool\nDATA_PATH = '../data/weibo_single/raw'\nUSER_DICT = './userdict.txt'\nPROCESSED_PATH = '../data/weibo_single/processed'\nENCODING = 'utf-8'\njieba.load_userdict(USER_DICT)\nDELETE = ['\\\\[.*?\\\\]', '\\u200b']\nMULTI = True\n\n\ndef replace_tokens(text, replace_dict=None):\n pattern = re.compile('|'.join(DELETE))\n text = re.sub(pattern, '', text)\n return text\n\n\ndef read_txt(file_path, encoding):\n with open(os.path.join(DATA_PATH, file_path), 'r', encoding=encoding,\n errors='replace') as f:\n text = f.read()\n text = replace_tokens(text)\n convs = text.split('\\n\\n')\n lines = [c.split('\\n') for c in convs]\n lines = [[i.strip() for i in c if i != ''] for c in lines]\n lines = [c for c in lines if len(c) > 1]\n return lines\n\n\ndef context_answers(convos):\n context, answers = [], []\n for convo in convos:\n for index, line in enumerate(convo[:-1]):\n context.append(line)\n answers.append(convo[index + 1])\n assert len(context) == len(answers)\n return context, answers\n\n\ndef _basic_tokenizer(line, normalize_digits=False):\n \"\"\"\n A basic tokenizer to tokenize text into tokens\n \"\"\"\n _DIGIT_RE = re.compile('\\\\d+')\n words = []\n tokens = list(jieba.cut(line.strip().lower()))\n if normalize_digits:\n for token in tokens:\n m = _DIGIT_RE.search(token)\n if m is None:\n words.append(token)\n else:\n words.append('_数字_')\n else:\n words = tokens\n return words\n\n\ndef _tokenized_data(context, answers):\n train_enc_tokens = [_basic_tokenizer(t) for t in context]\n print('Train_enc_token done.')\n train_dec_tokens = [_basic_tokenizer(t) for t in answers]\n print('Train_dec_token done.')\n return train_enc_tokens, train_dec_tokens\n\n\ndef _filter(ask_sent, answer_sent):\n if len(ask_sent) < 3 or len(answer_sent) < 2:\n return False\n if '@' in ask_sent or '@' in answer_sent:\n return False\n return True\n\n\ndef filter_data(context, answers):\n \"\"\"\n filter some answer that is too short or has @ in it \n \"\"\"\n context_return, answers_return = [], []\n for i in range(len(context)):\n c = context[i]\n a = answers[i]\n c_sent = ' '.join(c)\n a_sent = ' '.join(a)\n if _filter(c_sent, a_sent):\n context_return.append(c)\n answers_return.append(a)\n return context_return, answers_return\n\n\ndef save_tokenized_data(train_enc_tokens, train_dec_tokens, save_file_name):\n save_file_path = os.path.join(PROCESSED_PATH, save_file_name)\n pickle.dump((train_enc_tokens, train_dec_tokens, [], []), open(\n save_file_path, 'wb'))\n print('Data saved')\n\n\nif __name__ == '__main__':\n data_files = os.listdir(DATA_PATH)\n asks, ans = [], []\n for idx, file_path in enumerate(data_files):\n convos = read_txt(file_path, ENCODING)\n context, answers = context_answers(convos)\n asks.extend(context)\n ans.extend(answers)\n print('finish {}'.format(file_path))\n print('Total length {}'.format(len(asks)))\n if MULTI:\n print('tokanizing, multi process')\n cores = os.cpu_count() - 2\n p = Pool(cores)\n context = p.map(_basic_tokenizer, asks)\n print('Finish tokenizing ask sentences')\n answers = p.map(_basic_tokenizer, ans)\n print('Finish tokenizing answer sentences')\n p.close()\n p.join()\n else:\n context, answers = _tokenized_data(asks, ans)\n print('Total lentgh after tokenization: {}'.format(len(context)))\n context, answers = filter_data(context, answers)\n print('Total lentgh after filtering: {}'.format(len(context)))\n save_tokenized_data(context, answers, 'processed_tokens.p')\n", "step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Dec 20 09:54:08 2017\r\n\r\n@author: chuang\r\n\"\"\"\r\n\r\nimport os \r\nimport pickle \r\n#from collections import Counter\r\n#import user_replace\r\nimport jieba\r\nimport re\r\nfrom multiprocessing import Pool \r\n\r\n#%%\r\n\r\n# parameters for processing the dataset\r\nDATA_PATH = '../data/weibo_single/raw'\r\nUSER_DICT = './userdict.txt'\r\nPROCESSED_PATH = '../data/weibo_single/processed'\r\nENCODING = 'utf-8'\r\njieba.load_userdict(USER_DICT)\r\n\r\nDELETE = ['\\[.*?\\]','\\u200b']\r\nMULTI = True\r\n\r\n#%%\r\ndef replace_tokens(text,replace_dict=None):\r\n# for k,v in replace_dict.items():\r\n# pattern = re.compile(\"|\".join(v)) \r\n# text = pattern.sub(k,text)\r\n \r\n pattern = re.compile(\"|\".join(DELETE)) \r\n text = re.sub(pattern,'',text)\r\n return text\r\n\r\ndef read_txt(file_path,encoding):\r\n with open(os.path.join(DATA_PATH,file_path), 'r',encoding=encoding,errors='replace') as f:\r\n text = f.read()\r\n \r\n text = replace_tokens(text) #,user_replace.replace_dict\r\n convs = text.split('\\n\\n')\r\n lines = [c.split('\\n') for c in convs]\r\n lines = [[i.strip() for i in c if i != ''] for c in lines] ## get ride of empties sentences\r\n lines = [c for c in lines if len(c)>1]\r\n return lines\r\n\r\ndef context_answers(convos):\r\n context,answers = [],[]\r\n for convo in convos:\r\n for index,line in enumerate(convo[:-1]):\r\n context.append(line)\r\n answers.append(convo[index+1])\r\n \r\n assert len(context) == len(answers)\r\n return context,answers\r\n\r\ndef _basic_tokenizer(line,normalize_digits=False):\r\n \"\"\"\r\n A basic tokenizer to tokenize text into tokens\r\n \"\"\" \r\n _DIGIT_RE = re.compile(r\"\\d+\") ## find digits \r\n \r\n words = []\r\n tokens = list(jieba.cut(line.strip().lower()))\r\n if normalize_digits:\r\n for token in tokens:\r\n m = _DIGIT_RE.search(token)\r\n if m is None:\r\n words.append(token)\r\n else:\r\n words.append('_数字_')\r\n else:\r\n words = tokens \r\n \r\n return words \r\n\r\ndef _tokenized_data(context,answers):\r\n \r\n train_enc_tokens = [_basic_tokenizer(t) for t in context]\r\n print('Train_enc_token done.')\r\n \r\n train_dec_tokens = [_basic_tokenizer(t) for t in answers]\r\n print('Train_dec_token done.')\r\n \r\n return train_enc_tokens, train_dec_tokens\r\n\r\ndef _filter(ask_sent,answer_sent):\r\n \r\n if len(ask_sent)<3 or len(answer_sent)<2:\r\n return False \r\n \r\n if \"@\" in ask_sent or \"@\" in answer_sent:\r\n return False\r\n \r\n return True\r\n\r\ndef filter_data(context,answers):\r\n '''\r\n filter some answer that is too short or has @ in it \r\n '''\r\n context_return, answers_return = [],[]\r\n for i in range(len(context)):\r\n c = context[i]\r\n a = answers[i]\r\n c_sent = \" \".join(c)\r\n a_sent = \" \".join(a)\r\n \r\n if _filter(c_sent,a_sent):\r\n context_return.append(c)\r\n answers_return.append(a)\r\n \r\n return context_return,answers_return\r\n\r\ndef save_tokenized_data(train_enc_tokens,train_dec_tokens,save_file_name):\r\n save_file_path = os.path.join(PROCESSED_PATH,save_file_name)\r\n pickle.dump((train_enc_tokens, train_dec_tokens,[],[]),open(save_file_path,'wb'))\r\n print('Data saved')\r\n \r\n#%%\r\nif __name__ == \"__main__\":\r\n \r\n data_files = os.listdir(DATA_PATH) ## just do two files for now, too many data \r\n #%%\r\n asks,ans = [],[]\r\n for idx,file_path in enumerate(data_files):\r\n #file_path = 'multi_1_4.data'\r\n convos = read_txt(file_path,ENCODING)\r\n context,answers = context_answers(convos)\r\n \r\n asks.extend(context)\r\n ans.extend(answers)\r\n print('finish {}'.format(file_path))\r\n print('Total length {}'.format(len(asks)))\r\n #%%\r\n if MULTI:\r\n print('tokanizing, multi process')\r\n cores = os.cpu_count()-2 \r\n p = Pool(cores)\r\n context = p.map(_basic_tokenizer,asks)\r\n print('Finish tokenizing ask sentences')\r\n answers = p.map(_basic_tokenizer,ans)\r\n print('Finish tokenizing answer sentences')\r\n p.close()\r\n p.join()\r\n else:\r\n context,answers = _tokenized_data(asks,ans) \r\n \r\n print(\"Total lentgh after tokenization: {}\".format(len(context)))\r\n #%%\r\n context,answers = filter_data(context,answers)\r\n print(\"Total lentgh after filtering: {}\".format(len(context)))\r\n #%%\r\n ## save into pickles\r\n save_tokenized_data(context,answers,'processed_tokens.p')\r\n \r\n #%%\r\n #print(context[:50])\r\n #print(answers[:50])\r\n\r\n\r\n\r\n", "step-ids": [ 6, 7, 10, 11, 12 ] }
[ 6, 7, 10, 11, 12 ]
#=============================================================================== # @author: Daniel V. Stankevich # @organization: RMIT, School of Computer Science, 2012 # # # This package contains representations of the following models: # 'Particle' - an atomic element # 'Swarm' - a set of particles # 'Neighbourhood' - particles topology # 'KnapsackSolution' - representation for solution of the problem # 'TSPSolution' - representation for solution of the problem #=============================================================================== #=============================================================================== # GENERIC MODELS #=============================================================================== #---- Particle representation class ParticleModel: _position = None _velocity = None _bestPosition = None _nbBestPosition = None _fitness = -1 def __init__(self): self._position = None self._velocity = None self._bestPosition = None self._nbBestPosition = None self._fitness = -1 #---- Swarm representation class SwarmModel: _particles = None _neighbourhoods = None _bestPosition = None _bestPositionFitness = -1 def __init__(self): self._particles = [] self._neighbourhoods = None self._bestPosition = None self._bestPositionFitness = -1 #---- Neighbourhood representation class NeighbourhoodModel: _particles = [] _bestPosition = None _bestPositionFitness = -1 def __init__(self, particles): self._particles = particles self._bestPosition = None self._bestPositionFitness = -1 #=============================================================================== # PROBLEM SPECIFIC MODELS #=============================================================================== #---- Knapsack Problem Solution representation class KnapsackSolutionModel: _items = [] _knapsackSize = None def __init__(self, items, size): self._items = items self._knapsackSize = size #---- TSP Problem Solution representation class TSPSolutionModel: _edges = {} _startNode = None _numOfCities = None _bestPath = [] def __init__(self, edges, numOfCities, startNode): self._edges = edges self._numOfCities = numOfCities self._startNode = startNode
normal
{ "blob_id": "5c06229f8e80a7225620f25941cc5276a9021e53", "index": 5353, "step-1": "<mask token>\n\n\nclass SwarmModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n", "step-2": "<mask token>\n\n\nclass SwarmModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n", "step-3": "<mask token>\n\n\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n", "step-4": "class ParticleModel:\n _position = None\n _velocity = None\n _bestPosition = None\n _nbBestPosition = None\n _fitness = -1\n\n def __init__(self):\n self._position = None\n self._velocity = None\n self._bestPosition = None\n self._nbBestPosition = None\n self._fitness = -1\n\n\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n\n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\nclass KnapsackSolutionModel:\n _items = []\n _knapsackSize = None\n\n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n\n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode\n", "step-5": "#===============================================================================\n# @author: Daniel V. Stankevich\n# @organization: RMIT, School of Computer Science, 2012\n#\n#\n# This package contains representations of the following models:\n# 'Particle' - an atomic element\n# 'Swarm' - a set of particles\n# 'Neighbourhood' - particles topology\n# 'KnapsackSolution' - representation for solution of the problem\n# 'TSPSolution' - representation for solution of the problem\n#===============================================================================\n\n\n\n#===============================================================================\n# GENERIC MODELS\n#===============================================================================\n\n#---- Particle representation\nclass ParticleModel:\n _position = None\n _velocity = None\n _bestPosition = None\n _nbBestPosition = None\n _fitness = -1\n\n def __init__(self):\n self._position = None\n self._velocity = None\n self._bestPosition = None\n self._nbBestPosition = None\n self._fitness = -1\n\n#---- Swarm representation\nclass SwarmModel:\n _particles = None\n _neighbourhoods = None\n _bestPosition = None\n _bestPositionFitness = -1\n \n def __init__(self):\n self._particles = []\n self._neighbourhoods = None\n self._bestPosition = None\n self._bestPositionFitness = -1\n \n\n#---- Neighbourhood representation \nclass NeighbourhoodModel:\n _particles = []\n _bestPosition = None\n _bestPositionFitness = -1\n \n def __init__(self, particles):\n self._particles = particles\n self._bestPosition = None\n self._bestPositionFitness = -1\n\n\n#===============================================================================\n# PROBLEM SPECIFIC MODELS\n#===============================================================================\n\n#---- Knapsack Problem Solution representation \nclass KnapsackSolutionModel:\n _items = [] \n _knapsackSize = None\n \n def __init__(self, items, size):\n self._items = items\n self._knapsackSize = size\n\n#---- TSP Problem Solution representation\nclass TSPSolutionModel:\n _edges = {}\n _startNode = None\n _numOfCities = None\n _bestPath = []\n \n def __init__(self, edges, numOfCities, startNode):\n self._edges = edges\n self._numOfCities = numOfCities\n self._startNode = startNode", "step-ids": [ 10, 11, 12, 15, 16 ] }
[ 10, 11, 12, 15, 16 ]
from utilities import SumOneToN, RSS, MSE, R2Score import numpy as np import scipy.stats as st class RidgeLinearModel: covariance_matrix = None # covariance matrix of the model coefficients covariance_matrix_updated = False beta = None # coefficients of the modelfunction var_vector = None var_vector_updated = False CIbeta = None # confidence interval of betas CIbeta_updated = False x1 = None # first predictor of sampledata x2 = None # second predictor of sampledata y = None # responses of sampledata y_tilde = None # model predictions for x y_tilde_updated = False def __init__(this, lmb, k): this.lmb = lmb # set lambda of model this.k = k # set order of polynomial # This function fits the model to the the sample data # using Ridge regression # # @x: array containing predictors # @y: array containing responses # @k: the degree of the polynomial to be fitted to the sample data # @lmb: lambda, determines the emphasize on minimizing the variance # of the model # def fit(this, x1, x2, y): # store x ands y for later computations this.x1 = x1 this.x2 = x2 this.y = y # calculate the dimensions of the design matrix m = x1.shape[0] n = SumOneToN(this.k + 1) # allocate design matrix this.X = np.ones((m, n)) # compute values of design matrix for i in range(m): # vectoriser denne løkka for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): this.X[i][SumOneToN(p + 1) + j] *= x1[i]**(p + 1 - j)*x2[i]**j # compute linear regression coefficients this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb*np.identity(n)).dot(this.X.T).dot(y) # stored statistical parameters are no longer valid this.set_updated_to_false() # Predicts and returns the responses of the predictors with # the fitted model if the model is fitted # # @x1: Columnvector containing the first predictor values # @x2: Columnvector containing the second predictor values # def predict(this, x1, x2): if this.beta is None: print("Error: Model is not fitted.") return None else: # allocate meshgrid filled with constant term y = np.ones(x1.shape)*this.beta[0] # compute function values for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): y += this.beta[SumOneToN(p + 1) + j]*x1**(p+1-j)*x2**j return y # Returns the residuals of the model squared and summed def get_RSS(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return RSS(y, this.y_tilde) # Returns the mean squared error of the model # given the sample data (x1, x2, y) # # @x1: vector of first predictor # @x2: vector of second predictor # @y: vector of responses # def get_MSE(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return MSE(y, y_tilde) # Returns the R2 score of the model def get_R2Score(this, x1, x2, y): if this.beta is None: print("Error: Model is not fitted.") return None else: y_tilde = this.predict(x1, x2) return R2Score(y, y_tilde) # Computes the sample variance of the coefficients of the model # @B: The number of samples used def get_variance_of_betas(this, B=20): m = len(this.x1) n = SumOneToN(this.k + 1) betasamples = np.zeros((n, B)) for b in range(B): # create bootstrapsample c = np.random.choice(len(this.x1), len(this.x1)) s_x1 = this.x1[c] s_x2 = this.x2[c] s_y = this.y[c] # Next line fixes if y is one-dimensional if (len(s_y.shape)) == 1: s_y = np.expand_dims(this.y[c], axis=1) # allocate design matrix s_X = np.ones((m, n)) # compute values of design matrix for i in range(m): # vectoriser denne løkka for p in range(this.k): for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)): s_X[i][SumOneToN(p + 1) + j] *= s_x1[i]**(p + 1 - j)*s_x2[i]**j betasamples[:,b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb*np.identity(n)).dot(s_X.T).dot(s_y)[:, 0] betameans = betasamples.sum(axis=1, keepdims=True)/B # Compute variance vector this.var_vector = np.sum((betasamples - betameans)**2, axis=1)/B return this.var_vector # Returns the confidence interval of the betas def get_CI_of_beta(this, percentile=.95): if this.beta is None: print("Error: Model is not fitted.") return None else: if not this.CIbeta_updated: # stdcoeff is the z-score to the two-sided confidence interval stdcoeff = st.norm.ppf((1-percentile)/2) this.CI_beta = np.zeros((len(this.beta), 2)) for i in range(len(this.beta)): this.CI_beta[i][0] = this.beta[i] + stdcoeff*np.sqrt(this.var_vector[i]) this.CI_beta[i][1] = this.beta[i] - stdcoeff*np.sqrt(this.var_vector[i]) this.CIbeta_updated = True # CI_beta returns a nx2 matrix with each row # representing the confidence interval to the corresponding beta return this.CI_beta def set_updated_to_false(this): covariance_matrix_updated = False var_vector_updated = False y_tilde_updated = False CIbeta_updated = False
normal
{ "blob_id": "a5dcc66ece4e58995fe86c3a399c45975a596b1a", "index": 5836, "step-1": "<mask token>\n\n\nclass RidgeLinearModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n <mask token>\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n <mask token>\n <mask token>\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-2": "<mask token>\n\n\nclass RidgeLinearModel:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-3": "<mask token>\n\n\nclass RidgeLinearModel:\n covariance_matrix = None\n covariance_matrix_updated = False\n beta = None\n var_vector = None\n var_vector_updated = False\n CIbeta = None\n CIbeta_updated = False\n x1 = None\n x2 = None\n y = None\n y_tilde = None\n y_tilde_updated = False\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-4": "from utilities import SumOneToN, RSS, MSE, R2Score\nimport numpy as np\nimport scipy.stats as st\n\n\nclass RidgeLinearModel:\n covariance_matrix = None\n covariance_matrix_updated = False\n beta = None\n var_vector = None\n var_vector_updated = False\n CIbeta = None\n CIbeta_updated = False\n x1 = None\n x2 = None\n y = None\n y_tilde = None\n y_tilde_updated = False\n\n def __init__(this, lmb, k):\n this.lmb = lmb\n this.k = k\n\n def fit(this, x1, x2, y):\n this.x1 = x1\n this.x2 = x2\n this.y = y\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n this.X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i] ** (p + 1 - j\n ) * x2[i] ** j\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) + this.lmb * np.\n identity(n)).dot(this.X.T).dot(y)\n this.set_updated_to_false()\n\n def predict(this, x1, x2):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y = np.ones(x1.shape) * this.beta[0]\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1) + j] * x1 ** (p + 1 - j\n ) * x2 ** j\n return y\n\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n for b in range(B):\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n if len(s_y.shape) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n s_X = np.ones((m, n))\n for i in range(m):\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i] ** (p + 1 - j\n ) * s_x2[i] ** j\n betasamples[:, b] = np.linalg.pinv(s_X.T.dot(s_X) + this.lmb *\n np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n betameans = betasamples.sum(axis=1, keepdims=True) / B\n this.var_vector = np.sum((betasamples - betameans) ** 2, axis=1) / B\n return this.var_vector\n\n def get_CI_of_beta(this, percentile=0.95):\n if this.beta is None:\n print('Error: Model is not fitted.')\n return None\n else:\n if not this.CIbeta_updated:\n stdcoeff = st.norm.ppf((1 - percentile) / 2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff * np.sqrt(this\n .var_vector[i])\n this.CIbeta_updated = True\n return this.CI_beta\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-5": "from utilities import SumOneToN, RSS, MSE, R2Score\nimport numpy as np\nimport scipy.stats as st\n\nclass RidgeLinearModel:\n covariance_matrix = None # covariance matrix of the model coefficients\n covariance_matrix_updated = False\n beta = None # coefficients of the modelfunction\n var_vector = None\n var_vector_updated = False\n CIbeta = None # confidence interval of betas\n CIbeta_updated = False\n x1 = None # first predictor of sampledata\n x2 = None # second predictor of sampledata\n y = None # responses of sampledata\n y_tilde = None # model predictions for x\n y_tilde_updated = False\n\n\n def __init__(this, lmb, k):\n this.lmb = lmb # set lambda of model\n this.k = k # set order of polynomial\n\n\n # This function fits the model to the the sample data\n # using Ridge regression\n #\n # @x: array containing predictors\n # @y: array containing responses\n # @k: the degree of the polynomial to be fitted to the sample data\n # @lmb: lambda, determines the emphasize on minimizing the variance\n # of the model\n #\n def fit(this, x1, x2, y):\n # store x ands y for later computations\n this.x1 = x1\n this.x2 = x2\n this.y = y\n\n # calculate the dimensions of the design matrix\n m = x1.shape[0]\n n = SumOneToN(this.k + 1)\n\n # allocate design matrix\n this.X = np.ones((m, n))\n\n # compute values of design matrix\n for i in range(m): # vectoriser denne løkka\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n this.X[i][SumOneToN(p + 1) + j] *= x1[i]**(p\n + 1 - j)*x2[i]**j\n\n # compute linear regression coefficients\n this.beta = np.linalg.pinv(this.X.T.dot(this.X) +\n this.lmb*np.identity(n)).dot(this.X.T).dot(y)\n\n # stored statistical parameters are no longer valid\n this.set_updated_to_false()\n\n\n # Predicts and returns the responses of the predictors with\n # the fitted model if the model is fitted\n #\n # @x1: Columnvector containing the first predictor values\n # @x2: Columnvector containing the second predictor values\n #\n def predict(this, x1, x2):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n # allocate meshgrid filled with constant term\n y = np.ones(x1.shape)*this.beta[0]\n\n # compute function values\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n y += this.beta[SumOneToN(p + 1)\n + j]*x1**(p+1-j)*x2**j\n\n return y\n\n\n # Returns the residuals of the model squared and summed\n def get_RSS(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return RSS(y, this.y_tilde)\n\n\n # Returns the mean squared error of the model\n # given the sample data (x1, x2, y)\n #\n # @x1: vector of first predictor\n # @x2: vector of second predictor\n # @y: vector of responses\n #\n def get_MSE(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return MSE(y, y_tilde)\n\n\n # Returns the R2 score of the model\n def get_R2Score(this, x1, x2, y):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n y_tilde = this.predict(x1, x2)\n return R2Score(y, y_tilde)\n\n\n # Computes the sample variance of the coefficients of the model\n # @B: The number of samples used\n def get_variance_of_betas(this, B=20):\n m = len(this.x1)\n n = SumOneToN(this.k + 1)\n betasamples = np.zeros((n, B))\n\n for b in range(B):\n # create bootstrapsample\n c = np.random.choice(len(this.x1), len(this.x1))\n s_x1 = this.x1[c]\n s_x2 = this.x2[c]\n s_y = this.y[c]\n # Next line fixes if y is one-dimensional\n if (len(s_y.shape)) == 1:\n s_y = np.expand_dims(this.y[c], axis=1)\n\n # allocate design matrix\n s_X = np.ones((m, n))\n\n # compute values of design matrix\n for i in range(m): # vectoriser denne løkka\n for p in range(this.k):\n for j in range(SumOneToN(p + 2) - SumOneToN(p + 1)):\n s_X[i][SumOneToN(p + 1) + j] *= s_x1[i]**(p\n + 1 - j)*s_x2[i]**j\n\n betasamples[:,b] = np.linalg.pinv(s_X.T.dot(s_X) +\n this.lmb*np.identity(n)).dot(s_X.T).dot(s_y)[:, 0]\n\n betameans = betasamples.sum(axis=1, keepdims=True)/B\n\n # Compute variance vector\n this.var_vector = np.sum((betasamples - betameans)**2, axis=1)/B\n\n return this.var_vector\n\n\n # Returns the confidence interval of the betas\n def get_CI_of_beta(this, percentile=.95):\n if this.beta is None:\n print(\"Error: Model is not fitted.\")\n return None\n else:\n if not this.CIbeta_updated:\n\n # stdcoeff is the z-score to the two-sided confidence interval\n stdcoeff = st.norm.ppf((1-percentile)/2)\n this.CI_beta = np.zeros((len(this.beta), 2))\n for i in range(len(this.beta)):\n this.CI_beta[i][0] = this.beta[i] + stdcoeff*np.sqrt(this.var_vector[i])\n this.CI_beta[i][1] = this.beta[i] - stdcoeff*np.sqrt(this.var_vector[i])\n\n this.CIbeta_updated = True\n # CI_beta returns a nx2 matrix with each row\n # representing the confidence interval to the corresponding beta\n return this.CI_beta\n\n\n def set_updated_to_false(this):\n covariance_matrix_updated = False\n var_vector_updated = False\n y_tilde_updated = False\n CIbeta_updated = False\n", "step-ids": [ 6, 10, 11, 12, 13 ] }
[ 6, 10, 11, 12, 13 ]
import qrcode def generate_qr(query): img = qrcode.make(query)
normal
{ "blob_id": "e97bcf31657317f33f4a138ede80bb9171337f52", "index": 4730, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef generate_qr(query):\n img = qrcode.make(query)\n", "step-3": "import qrcode\n\n\ndef generate_qr(query):\n img = qrcode.make(query)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import platform, sys, os, ensurepip ensurepip.bootstrap() try: import pip except ImportError: print("Error: Failed to install pip, make sure you are running this script as admin.") sys.exit() arch = platform.architecture()[0] wheelUrl = "https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/" print("You are using Python" + str(sys.version_info[0]) + str(sys.version_info[1]) + " " + arch + ".") if sys.version_info[0] == 2 and sys.version_info[1] == 7: if arch == "64bit": wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win_amd64.whl" else: wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win32.whl" elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6): if sys.version_info[1] == 4: if arch == "64bit": wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win_amd64.whl" else: wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win32.whl" elif sys.version_info[1] == 5: if arch == "64bit": wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win_amd64.whl" else: wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win32.whl" elif sys.version_info[1] == 6: if arch == "64bit": wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win_amd64.whl" else: wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win32.whl" else: print("Pygame only supports Python 27, 34, 35 and 36.") sys.exit() if pip.main(["install", wheelUrl]) == 0: print("Pygame should now be installed.") else: print("Something went wrong during the installation of pygame.") os.system("pause")
normal
{ "blob_id": "b44f75db652b3a40cd9475bfe44027724e845252", "index": 1146, "step-1": "<mask token>\n", "step-2": "<mask token>\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\n<mask token>\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n", "step-3": "<mask token>\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\narch = platform.architecture()[0]\nwheelUrl = (\n 'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'\n )\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n", "step-4": "import platform, sys, os, ensurepip\nensurepip.bootstrap()\ntry:\n import pip\nexcept ImportError:\n print(\n 'Error: Failed to install pip, make sure you are running this script as admin.'\n )\n sys.exit()\narch = platform.architecture()[0]\nwheelUrl = (\n 'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'\n )\nprint('You are using Python' + str(sys.version_info[0]) + str(sys.\n version_info[1]) + ' ' + arch + '.')\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\n if sys.version_info[1] == 4:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'\n elif sys.version_info[1] == 5:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'\n elif sys.version_info[1] == 6:\n if arch == '64bit':\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'\n else:\n wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'\nelse:\n print('Pygame only supports Python 27, 34, 35 and 36.')\n sys.exit()\nif pip.main(['install', wheelUrl]) == 0:\n print('Pygame should now be installed.')\nelse:\n print('Something went wrong during the installation of pygame.')\nos.system('pause')\n", "step-5": "import platform, sys, os, ensurepip\r\n\nensurepip.bootstrap()\n\ntry:\n\timport pip\nexcept ImportError:\n\tprint(\"Error: Failed to install pip, make sure you are running this script as admin.\")\n\tsys.exit()\n\narch = platform.architecture()[0]\r\nwheelUrl = \"https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/\"\r\n\r\nprint(\"You are using Python\" + str(sys.version_info[0]) + str(sys.version_info[1]) + \" \" + arch + \".\")\r\n\r\nif sys.version_info[0] == 2 and sys.version_info[1] == 7:\r\n\tif arch == \"64bit\":\r\n\t\twheelUrl += \"pygame-1.9.2b1-cp27-cp27m-win_amd64.whl\"\r\n\telse:\r\n\t\twheelUrl += \"pygame-1.9.2b1-cp27-cp27m-win32.whl\"\r\nelif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):\r\n\tif sys.version_info[1] == 4:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp34-cp34m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp34-cp34m-win32.whl\"\r\n\telif sys.version_info[1] == 5:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp35-cp35m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b1-cp35-cp35m-win32.whl\"\r\n\telif sys.version_info[1] == 6:\r\n\t\tif arch == \"64bit\":\r\n\t\t\twheelUrl += \"pygame-1.9.2b8-cp36-cp36m-win_amd64.whl\"\r\n\t\telse:\r\n\t\t\twheelUrl += \"pygame-1.9.2b8-cp36-cp36m-win32.whl\"\r\nelse:\r\n\tprint(\"Pygame only supports Python 27, 34, 35 and 36.\")\r\n\tsys.exit()\r\n\r\nif pip.main([\"install\", wheelUrl]) == 0:\r\n\tprint(\"Pygame should now be installed.\")\r\nelse:\r\n\tprint(\"Something went wrong during the installation of pygame.\")\r\n\r\nos.system(\"pause\")\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import torch import torch_scatter import torchgraphs as tg import textwrap from . import autograd_tricks as lrp def patch(): torch.add = lrp.add torch.cat = lrp.cat torch.index_select = lrp.index_select tg.utils.repeat_tensor = lrp.repeat_tensor torch_scatter.scatter_add = lrp.scatter_add torch_scatter.scatter_mean = lrp.scatter_mean torch_scatter.scatter_max = lrp.scatter_max torch.nn.functional.linear = lrp.linear_eps def computational_graph(op): if op is None: return 'None' res = f'{op.__class__.__name__} at {hex(id(op))}:' if op.__class__.__name__ == 'AccumulateGrad': res += f'variable at {hex(id(op.variable))}' for op in op.next_functions: res += '\n-' + textwrap.indent(computational_graph(op[0]), ' ') return res
normal
{ "blob_id": "faafc7cfd900d3f6fd6df30af5580f71eecfb279", "index": 8298, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef computational_graph(op):\n if op is None:\n return 'None'\n res = f'{op.__class__.__name__} at {hex(id(op))}:'\n if op.__class__.__name__ == 'AccumulateGrad':\n res += f'variable at {hex(id(op.variable))}'\n for op in op.next_functions:\n res += '\\n-' + textwrap.indent(computational_graph(op[0]), ' ')\n return res\n", "step-3": "<mask token>\n\n\ndef patch():\n torch.add = lrp.add\n torch.cat = lrp.cat\n torch.index_select = lrp.index_select\n tg.utils.repeat_tensor = lrp.repeat_tensor\n torch_scatter.scatter_add = lrp.scatter_add\n torch_scatter.scatter_mean = lrp.scatter_mean\n torch_scatter.scatter_max = lrp.scatter_max\n torch.nn.functional.linear = lrp.linear_eps\n\n\ndef computational_graph(op):\n if op is None:\n return 'None'\n res = f'{op.__class__.__name__} at {hex(id(op))}:'\n if op.__class__.__name__ == 'AccumulateGrad':\n res += f'variable at {hex(id(op.variable))}'\n for op in op.next_functions:\n res += '\\n-' + textwrap.indent(computational_graph(op[0]), ' ')\n return res\n", "step-4": "import torch\nimport torch_scatter\nimport torchgraphs as tg\nimport textwrap\nfrom . import autograd_tricks as lrp\n\n\ndef patch():\n torch.add = lrp.add\n torch.cat = lrp.cat\n torch.index_select = lrp.index_select\n tg.utils.repeat_tensor = lrp.repeat_tensor\n torch_scatter.scatter_add = lrp.scatter_add\n torch_scatter.scatter_mean = lrp.scatter_mean\n torch_scatter.scatter_max = lrp.scatter_max\n torch.nn.functional.linear = lrp.linear_eps\n\n\ndef computational_graph(op):\n if op is None:\n return 'None'\n res = f'{op.__class__.__name__} at {hex(id(op))}:'\n if op.__class__.__name__ == 'AccumulateGrad':\n res += f'variable at {hex(id(op.variable))}'\n for op in op.next_functions:\n res += '\\n-' + textwrap.indent(computational_graph(op[0]), ' ')\n return res\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-07 23:42 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('events', '0005_auto_20160207_1529'), ] operations = [ migrations.AddField( model_name='event', name='skins_type', field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'), ), migrations.AddField( model_name='eventtemplate', name='skins_type', field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'), ), migrations.AddField( model_name='historicalevent', name='skins_type', field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'), ), migrations.AddField( model_name='historicaleventtemplate', name='skins_type', field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'), ), migrations.AlterField( model_name='event', name='event_type', field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'), ), migrations.AlterField( model_name='event', name='scoring', field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'), ), migrations.AlterField( model_name='eventtemplate', name='event_type', field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'), ), migrations.AlterField( model_name='eventtemplate', name='scoring', field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'), ), migrations.AlterField( model_name='historicalevent', name='event_type', field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'), ), migrations.AlterField( model_name='historicalevent', name='scoring', field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'), ), migrations.AlterField( model_name='historicaleventtemplate', name='event_type', field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'), ), migrations.AlterField( model_name='historicaleventtemplate', name='scoring', field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'), ), ]
normal
{ "blob_id": "ab3609c27fa002d79735c5d5c09ec7a52fedd040", "index": 3484, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('events', '0005_auto_20160207_1529')]\n operations = [migrations.AddField(model_name='event', name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'),\n ('N', 'No Skins')], default='N', max_length=1, verbose_name=\n 'Skins type')), migrations.AddField(model_name='eventtemplate',\n name='skins_type', field=models.CharField(choices=[('I',\n 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N',\n max_length=1, verbose_name='Skins type')), migrations.AddField(\n model_name='historicalevent', name='skins_type', field=models.\n CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N',\n 'No Skins')], default='N', max_length=1, verbose_name='Skins type')\n ), migrations.AddField(model_name='historicaleventtemplate', name=\n 'skins_type', field=models.CharField(choices=[('I', 'Individual'),\n ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1,\n verbose_name='Skins type')), migrations.AlterField(model_name=\n 'event', name='event_type', field=models.CharField(choices=[('L',\n 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'),\n ('O', 'Other')], default='M', max_length=1, verbose_name=\n 'Event type')), migrations.AlterField(model_name='event', name=\n 'scoring', field=models.CharField(choices=[('IN', 'Individual'), (\n 'TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name='eventtemplate',\n name='event_type', field=models.CharField(choices=[('L', 'League'),\n ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O',\n 'Other')], default='M', max_length=1, verbose_name='Event type')),\n migrations.AlterField(model_name='eventtemplate', name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB',\n 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS',\n 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC',\n 'Team: Combination')], default='IN', max_length=3, verbose_name=\n 'Scoring type')), migrations.AlterField(model_name=\n 'historicalevent', name='event_type', field=models.CharField(\n choices=[('L', 'League'), ('M', 'Weekend Major'), ('H',\n 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length\n =1, verbose_name='Event type')), migrations.AlterField(model_name=\n 'historicalevent', name='scoring', field=models.CharField(choices=[\n ('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG',\n 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='event_type', field=\n models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'),\n ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M',\n max_length=1, verbose_name='Event type')), migrations.AlterField(\n model_name='historicaleventtemplate', name='scoring', field=models.\n CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'),\n ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA',\n 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN',\n max_length=3, verbose_name='Scoring type'))]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.2 on 2016-02-07 23:42\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0005_auto_20160207_1529'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='event',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='eventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicalevent',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AddField(\n model_name='historicaleventtemplate',\n name='skins_type',\n field=models.CharField(choices=[('I', 'Individual'), ('T', 'Team'), ('N', 'No Skins')], default='N', max_length=1, verbose_name='Skins type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='event',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='eventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicalevent',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='event_type',\n field=models.CharField(choices=[('L', 'League'), ('M', 'Weekend Major'), ('H', 'Holiday Pro-shop Event'), ('O', 'Other')], default='M', max_length=1, verbose_name='Event type'),\n ),\n migrations.AlterField(\n model_name='historicaleventtemplate',\n name='scoring',\n field=models.CharField(choices=[('IN', 'Individual'), ('TBB', 'Team: Best Ball'), ('TAG', 'Team: Aggregate Score'), ('TS', 'Team: Scramble'), ('TA', 'Team: Alternate Shot'), ('TC', 'Team: Combination')], default='IN', max_length=3, verbose_name='Scoring type'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import pandas as pd import numpy as np class LabeledArray: @staticmethod def get_label_for_indexes_upto(input_data, input_label, input_index): df_input_data = pd.DataFrame(input_data) df_labels = pd.DataFrame(input_label) df_data_labels = pd.concat([df_input_data, df_labels], axis=1) df_data_labels.columns = ['input_data', 'input_label'] df_data_labels.sort_values(by=['input_data'], ascending=True, inplace=True) return np.array(df_data_labels.iloc[:, 1].head(input_index))
normal
{ "blob_id": "0dea8675d8050a91c284a13bcbce6fd0943b604e", "index": 5135, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass LabeledArray:\n <mask token>\n", "step-3": "<mask token>\n\n\nclass LabeledArray:\n\n @staticmethod\n def get_label_for_indexes_upto(input_data, input_label, input_index):\n df_input_data = pd.DataFrame(input_data)\n df_labels = pd.DataFrame(input_label)\n df_data_labels = pd.concat([df_input_data, df_labels], axis=1)\n df_data_labels.columns = ['input_data', 'input_label']\n df_data_labels.sort_values(by=['input_data'], ascending=True,\n inplace=True)\n return np.array(df_data_labels.iloc[:, 1].head(input_index))\n", "step-4": "import pandas as pd\nimport numpy as np\n\n\nclass LabeledArray:\n\n @staticmethod\n def get_label_for_indexes_upto(input_data, input_label, input_index):\n df_input_data = pd.DataFrame(input_data)\n df_labels = pd.DataFrame(input_label)\n df_data_labels = pd.concat([df_input_data, df_labels], axis=1)\n df_data_labels.columns = ['input_data', 'input_label']\n df_data_labels.sort_values(by=['input_data'], ascending=True,\n inplace=True)\n return np.array(df_data_labels.iloc[:, 1].head(input_index))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from abc import abstractmethod from anoncreds.protocol.repo.public_repo import PublicRepo from anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, \ RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, \ RevocationSecretKey, AccumulatorSecretKey, \ TimestampType from anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory class IssuerWallet(Wallet): def __init__(self, claimDefId, repo: PublicRepo): Wallet.__init__(self, claimDefId, repo) # SUBMIT @abstractmethod async def submitClaimDef(self, claimDef: ClaimDefinition) -> ClaimDefinition: raise NotImplementedError @abstractmethod async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR: RevocationPublicKey = None) -> ( PublicKey, RevocationPublicKey): raise NotImplementedError @abstractmethod async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR: RevocationSecretKey = None): raise NotImplementedError @abstractmethod async def submitAccumPublic(self, claimDefId: ID, accumPK: AccumulatorPublicKey, accum: Accumulator, tails: TailsType): raise NotImplementedError @abstractmethod async def submitAccumSecret(self, claimDefId: ID, accumSK: AccumulatorSecretKey): raise NotImplementedError @abstractmethod async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator, timestampMs: TimestampType): raise NotImplementedError @abstractmethod async def submitContextAttr(self, claimDefId: ID, m2): raise NotImplementedError # GET @abstractmethod async def getSecretKey(self, claimDefId: ID) -> SecretKey: raise NotImplementedError @abstractmethod async def getSecretKeyRevocation(self, claimDefId: ID) -> RevocationSecretKey: raise NotImplementedError @abstractmethod async def getSecretKeyAccumulator(self, claimDefId: ID) -> AccumulatorSecretKey: raise NotImplementedError @abstractmethod async def getContextAttr(self, claimDefId: ID): raise NotImplementedError class IssuerWalletInMemory(IssuerWallet, WalletInMemory): def __init__(self, claimDefId, repo: PublicRepo): WalletInMemory.__init__(self, claimDefId, repo) # other dicts with key=claimDefKey self._sks = {} self._skRs = {} self._accumSks = {} self._m2s = {} self._attributes = {} # SUBMIT async def submitClaimDef(self, claimDef: ClaimDefinition) -> ClaimDefinition: claimDef = await self._repo.submitClaimDef(claimDef) self._cacheClaimDef(claimDef) return claimDef async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR: RevocationPublicKey = None) -> ( PublicKey, RevocationPublicKey): pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR) await self._cacheValueForId(self._pks, claimDefId, pk) if pkR: await self._cacheValueForId(self._pkRs, claimDefId, pkR) return pk, pkR async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR: RevocationSecretKey = None): await self._cacheValueForId(self._sks, claimDefId, sk) if skR: await self._cacheValueForId(self._skRs, claimDefId, skR) async def submitAccumPublic(self, claimDefId: ID, accumPK: AccumulatorPublicKey, accum: Accumulator, tails: TailsType) -> AccumulatorPublicKey: accumPK = await self._repo.submitAccumulator(claimDefId, accumPK, accum, tails) await self._cacheValueForId(self._accums, claimDefId, accum) await self._cacheValueForId(self._accumPks, claimDefId, accumPK) await self._cacheValueForId(self._tails, claimDefId, tails) return accumPK async def submitAccumSecret(self, claimDefId: ID, accumSK: AccumulatorSecretKey): await self._cacheValueForId(self._accumSks, claimDefId, accumSK) async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator, timestampMs: TimestampType): await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs) await self._cacheValueForId(self._accums, claimDefId, accum) async def submitContextAttr(self, claimDefId: ID, m2): await self._cacheValueForId(self._m2s, claimDefId, m2) # GET async def getSecretKey(self, claimDefId: ID) -> SecretKey: return await self._getValueForId(self._sks, claimDefId) async def getSecretKeyRevocation(self, claimDefId: ID) -> RevocationSecretKey: return await self._getValueForId(self._skRs, claimDefId) async def getSecretKeyAccumulator(self, claimDefId: ID) -> AccumulatorSecretKey: return await self._getValueForId(self._accumSks, claimDefId) async def getContextAttr(self, claimDefId: ID): return await self._getValueForId(self._m2s, claimDefId)
normal
{ "blob_id": "890841c8892e89375bb022f0d469fefc27414a2b", "index": 5823, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n", "step-3": "<mask token>\n\n\nclass IssuerWallet(Wallet):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n @abstractmethod\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n", "step-4": "from abc import abstractmethod\nfrom anoncreds.protocol.repo.public_repo import PublicRepo\nfrom anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, RevocationSecretKey, AccumulatorSecretKey, TimestampType\nfrom anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory\n\n\nclass IssuerWallet(Wallet):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n @abstractmethod\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n async def submitClaimDef(self, claimDef: ClaimDefinition\n ) ->ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey, pkR:\n RevocationPublicKey=None) ->(PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey, skR:\n RevocationSecretKey=None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID, accumPK:\n AccumulatorPublicKey, accum: Accumulator, tails: TailsType\n ) ->AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK,\n accum, tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID, accumSK:\n AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n async def getSecretKey(self, claimDefId: ID) ->SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self, claimDefId: ID\n ) ->RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self, claimDefId: ID\n ) ->AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n", "step-5": "from abc import abstractmethod\n\nfrom anoncreds.protocol.repo.public_repo import PublicRepo\nfrom anoncreds.protocol.types import ClaimDefinition, PublicKey, SecretKey, ID, \\\n RevocationPublicKey, AccumulatorPublicKey, Accumulator, TailsType, \\\n RevocationSecretKey, AccumulatorSecretKey, \\\n TimestampType\nfrom anoncreds.protocol.wallet.wallet import Wallet, WalletInMemory\n\n\nclass IssuerWallet(Wallet):\n def __init__(self, claimDefId, repo: PublicRepo):\n Wallet.__init__(self, claimDefId, repo)\n\n # SUBMIT\n\n @abstractmethod\n async def submitClaimDef(self,\n claimDef: ClaimDefinition) -> ClaimDefinition:\n raise NotImplementedError\n\n @abstractmethod\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,\n pkR: RevocationPublicKey = None) -> (\n PublicKey, RevocationPublicKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,\n skR: RevocationSecretKey = None):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumPublic(self, claimDefId: ID,\n accumPK: AccumulatorPublicKey,\n accum: Accumulator, tails: TailsType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumSecret(self, claimDefId: ID,\n accumSK: AccumulatorSecretKey):\n raise NotImplementedError\n\n @abstractmethod\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n raise NotImplementedError\n\n @abstractmethod\n async def submitContextAttr(self, claimDefId: ID, m2):\n raise NotImplementedError\n\n # GET\n\n @abstractmethod\n async def getSecretKey(self, claimDefId: ID) -> SecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyRevocation(self,\n claimDefId: ID) -> RevocationSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getSecretKeyAccumulator(self,\n claimDefId: ID) -> AccumulatorSecretKey:\n raise NotImplementedError\n\n @abstractmethod\n async def getContextAttr(self, claimDefId: ID):\n raise NotImplementedError\n\n\nclass IssuerWalletInMemory(IssuerWallet, WalletInMemory):\n def __init__(self, claimDefId, repo: PublicRepo):\n WalletInMemory.__init__(self, claimDefId, repo)\n\n # other dicts with key=claimDefKey\n self._sks = {}\n self._skRs = {}\n self._accumSks = {}\n self._m2s = {}\n self._attributes = {}\n\n # SUBMIT\n\n async def submitClaimDef(self,\n claimDef: ClaimDefinition) -> ClaimDefinition:\n claimDef = await self._repo.submitClaimDef(claimDef)\n self._cacheClaimDef(claimDef)\n return claimDef\n\n async def submitPublicKeys(self, claimDefId: ID, pk: PublicKey,\n pkR: RevocationPublicKey = None) -> (\n PublicKey, RevocationPublicKey):\n pk, pkR = await self._repo.submitPublicKeys(claimDefId, pk, pkR)\n await self._cacheValueForId(self._pks, claimDefId, pk)\n if pkR:\n await self._cacheValueForId(self._pkRs, claimDefId, pkR)\n return pk, pkR\n\n async def submitSecretKeys(self, claimDefId: ID, sk: SecretKey,\n skR: RevocationSecretKey = None):\n await self._cacheValueForId(self._sks, claimDefId, sk)\n if skR:\n await self._cacheValueForId(self._skRs, claimDefId, skR)\n\n async def submitAccumPublic(self, claimDefId: ID,\n accumPK: AccumulatorPublicKey,\n accum: Accumulator,\n tails: TailsType) -> AccumulatorPublicKey:\n accumPK = await self._repo.submitAccumulator(claimDefId, accumPK, accum,\n tails)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n await self._cacheValueForId(self._accumPks, claimDefId, accumPK)\n await self._cacheValueForId(self._tails, claimDefId, tails)\n return accumPK\n\n async def submitAccumSecret(self, claimDefId: ID,\n accumSK: AccumulatorSecretKey):\n await self._cacheValueForId(self._accumSks, claimDefId, accumSK)\n\n async def submitAccumUpdate(self, claimDefId: ID, accum: Accumulator,\n timestampMs: TimestampType):\n await self._repo.submitAccumUpdate(claimDefId, accum, timestampMs)\n await self._cacheValueForId(self._accums, claimDefId, accum)\n\n async def submitContextAttr(self, claimDefId: ID, m2):\n await self._cacheValueForId(self._m2s, claimDefId, m2)\n\n # GET\n\n async def getSecretKey(self, claimDefId: ID) -> SecretKey:\n return await self._getValueForId(self._sks, claimDefId)\n\n async def getSecretKeyRevocation(self,\n claimDefId: ID) -> RevocationSecretKey:\n return await self._getValueForId(self._skRs, claimDefId)\n\n async def getSecretKeyAccumulator(self,\n claimDefId: ID) -> AccumulatorSecretKey:\n return await self._getValueForId(self._accumSks, claimDefId)\n\n async def getContextAttr(self, claimDefId: ID):\n return await self._getValueForId(self._m2s, claimDefId)\n", "step-ids": [ 0, 2, 4, 5, 6 ] }
[ 0, 2, 4, 5, 6 ]
""" AlbumInfo-related frames for the Album view. """ from __future__ import annotations import logging from typing import TYPE_CHECKING, Iterator, Collection, Any from ds_tools.caching.decorators import cached_property from tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer from tk_gui.elements.buttons import Button, EventButton as EButton from tk_gui.elements.choices import ListBox, CheckBox, Combo from tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame from tk_gui.elements.menu import Menu, MenuItem from tk_gui.elements.rating import Rating from tk_gui.popups import pick_file_popup from music.common.disco_entry import DiscoEntryType from music.files import SongFile from music.manager.update import TrackInfo, AlbumInfo from ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file from .helpers import IText from .images import AlbumCoverImageBuilder from .list_box import EditableListBox if TYPE_CHECKING: from tk_gui.typing import Layout, Bool, XY __all__ = ['AlbumInfoFrame', 'TrackInfoFrame'] log = logging.getLogger(__name__) ValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input LRG_FONT = ('Helvetica', 20) class TagModMixin: _tag_vals_and_eles: dict[str, tuple[Any, ValueEle]] def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]: for key, (original_val, val_ele) in self._tag_vals_and_eles.items(): if (value := val_ele.value) != original_val: yield key, val_ele, original_val, value def reset_tag_values(self): for key, val_ele, original_val, value in self._iter_changes(): match val_ele: case ListBox() | EditableListBox(): val_ele.update(choices=original_val, replace=True, select=True) case _: # Input() | Text() | CheckBox() | Combo() | Rating() val_ele.update(original_val) def get_modified(self) -> dict[str, tuple[Any, Any]]: return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()} class AlbumInfoFrame(TagModMixin, InteractiveFrame): album_info: AlbumInfo def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs): super().__init__(**kwargs) self.album_info = get_album_info(album) self.album_dir = get_album_dir(album) self.cover_size = cover_size self._tag_vals_and_eles = {} # region Layout Generation def get_custom_layout(self) -> Layout: yield from self.build_meta_rows() yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)] yield [HorizontalSeparator()] yield from self.build_buttons() def build_meta_rows(self): data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()} for track in self.album_dir: info = track.info for key, values in data.items(): if value := info[key]: values.add(str(value)) data = {key: ' / '.join(sorted(values)) for key, values in data.items()} yield [ Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)), Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)), Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)), ] yield [HorizontalSeparator()] def build_tag_rows(self): tooltips = { 'name': 'The name that was / should be used for the album directory', 'parent': 'The name that was / should be used for the artist directory', 'singer': 'Solo singer of a group, when the album should be sorted under their group', 'solo_of_group': 'Whether the singer is a soloist', } disabled = self.disabled for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items(): if tooltip := tooltips.get(key): kwargs = {'tooltip': tooltip} else: kwargs = {} key_ele = label_ele(key, **kwargs) if key == 'type': types = [de.real_name for de in DiscoEntryType] if value: if isinstance(value, DiscoEntryType): value = value.real_name elif value not in types: types.append(value) val_ele = Combo( types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type ) elif key == 'genre': val_ele = _genre_list_box(value, self.album_info, disabled, key=key) elif key in {'mp4', 'solo_of_group'}: kwargs['disabled'] = True if key == 'mp4' else disabled val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs) else: if key.startswith('wiki_'): kwargs['link'] = True elif key == 'number': kwargs['change_cb'] = self._update_numbered_type value = _normalize_input_value(value) val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs) self._tag_vals_and_eles[key] = (value, val_ele) yield [key_ele, val_ele] @cached_property def cover_image_frame(self) -> Frame: class ImageMenu(Menu): MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled) # TODO: Include get_wiki_cover_choice? cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size) return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu()) # endregion # region Layout Generation - Buttons def build_buttons(self) -> Layout: # These frames need to be in the same row for them to occupy the same space when visible yield [self.view_buttons_frame, self.edit_buttons_frame] @cached_property def view_buttons_frame(self) -> Frame: rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()] return Frame(rows, visible=self.disabled, side='t') def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa kwargs = {'size': (18, 1), 'borderwidth': 3} yield [ EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs), EButton('View All Tags', key='view_all_tags', **kwargs), EButton('Edit', key='edit_album', **kwargs), EButton('Wiki Update', key='wiki_update', **kwargs), ] kwargs['size'] = (25, 1) # TODO: Handle replacing inferior versions in real destination directory yield [ # EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs), EButton('Sort Into Library', key='sort_into_library', **kwargs), # EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs), ] yield [ EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs), EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs), ] # TODO: Unify the above/below rows / shorten text / merge functionality with the sort view yield [ EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs), EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs), ] open_btn = EButton('\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3) album_dir = self.album_dir # TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories if len(album_dir.parent) > 1: kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3) yield [ EButton('\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)), open_btn, EButton('\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)), ] else: yield [open_btn] @cached_property def edit_buttons_frame(self) -> BasicRowFrame: kwargs = {'size': (18, 1), 'borderwidth': 3} row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)] return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled) # endregion # region Event Handling def enable(self): if not self.disabled: return super().enable() self.view_buttons_frame.hide() self.edit_buttons_frame.show() def disable(self): if self.disabled: return super().disable() self.edit_buttons_frame.hide() self.view_buttons_frame.show() def _update_numbered_type(self, var_name, unknown, action): # Registered as a change_cb for `type` and `number` num_ele: Input = self._tag_vals_and_eles['number'][1] value = '' try: value = num_ele.value.strip() num_val = int(value) except (TypeError, ValueError, AttributeError): num_ele.validated(not value) return else: num_ele.validated(True) type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value) if type_val == DiscoEntryType.UNKNOWN: return num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1] num_type_ele.update(type_val.format(num_val)) def _replace_cover_image(self, event=None): if self.disabled: return if path := pick_file_popup(title='Pick new album cover'): cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1] cover_path_ele.update(path.as_posix()) image_ele: Image = self.cover_image_frame.rows[0].elements[0] image_ele.image = path # endregion class TrackInfoFrame(TagModMixin, InteractiveFrame): track_info: TrackInfo song_file: SongFile show_cover: Bool = False def __init__(self, track: TrackIdentifier, **kwargs): super().__init__(**kwargs) self.track_info = get_track_info(track) self.song_file = get_track_file(track) self._tag_vals_and_eles = {} @cached_property def path_str(self) -> str: return self.track_info.path.as_posix() @cached_property def file_name(self) -> str: return self.track_info.path.name def get_custom_layout(self) -> Layout: yield from self.build_meta_rows() yield from self.build_info_rows() def build_meta_rows(self) -> Iterator[list[Element]]: yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))] sf = self.song_file yield [ Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)), Text('Type:'), IText(sf.tag_version, size=(20, 1)), ] def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]: fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating'] if keys: fields = [f for f in fields if f not in keys] track_info, disabled = self.track_info, self.disabled for key in fields: if key == 'genre': value = track_info.genre_set.difference(track_info.album.genre_set) val_ele = _genre_list_box(value, track_info, disabled) elif key == 'rating': if (value := track_info[key]) is None: value = 0 val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled) else: value = _normalize_input_value(track_info[key]) val_ele = Input(value, size=(50, 1), disabled=disabled) self._tag_vals_and_eles[key] = (value, val_ele) yield [label_ele(key, size=(6, 1)), val_ele] def _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox: kwargs.setdefault('add_title', 'Add genre') kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}') kwargs.setdefault('list_width', 40) return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs) def _normalize_input_value(value) -> str: if value is None: value = '' elif not isinstance(value, str): value = str(value) return value def label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text: return Text(text.replace('_', ' ').title(), size=size, **kwargs) class TagFrame(InteractiveFrame): def enable(self): if not self.disabled: return for row in self.rows: for ele in row.elements: try: if ele.key == 'mp4': # Read-only continue except AttributeError: pass try: ele.enable() # noqa except AttributeError: pass self.disabled = False
normal
{ "blob_id": "384588e1a767081191228db2afa4a489f967a220", "index": 3952, "step-1": "\"\"\"\nAlbumInfo-related frames for the Album view.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nfrom typing import TYPE_CHECKING, Iterator, Collection, Any\n\nfrom ds_tools.caching.decorators import cached_property\nfrom tk_gui.elements import Element, HorizontalSeparator, Multiline, Text, Input, Image, Spacer\nfrom tk_gui.elements.buttons import Button, EventButton as EButton\nfrom tk_gui.elements.choices import ListBox, CheckBox, Combo\nfrom tk_gui.elements.frame import InteractiveFrame, Frame, BasicRowFrame\nfrom tk_gui.elements.menu import Menu, MenuItem\nfrom tk_gui.elements.rating import Rating\nfrom tk_gui.popups import pick_file_popup\n\nfrom music.common.disco_entry import DiscoEntryType\nfrom music.files import SongFile\nfrom music.manager.update import TrackInfo, AlbumInfo\nfrom ..utils import AlbumIdentifier, TrackIdentifier, get_album_info, get_album_dir, get_track_info, get_track_file\nfrom .helpers import IText\nfrom .images import AlbumCoverImageBuilder\nfrom .list_box import EditableListBox\n\nif TYPE_CHECKING:\n from tk_gui.typing import Layout, Bool, XY\n\n__all__ = ['AlbumInfoFrame', 'TrackInfoFrame']\nlog = logging.getLogger(__name__)\n\nValueEle = Text | Multiline | Rating | ListBox | Combo | EditableListBox | Input\nLRG_FONT = ('Helvetica', 20)\n\n\nclass TagModMixin:\n _tag_vals_and_eles: dict[str, tuple[Any, ValueEle]]\n\n def _iter_changes(self) -> Iterator[tuple[str, ValueEle, Any, Any]]:\n for key, (original_val, val_ele) in self._tag_vals_and_eles.items():\n if (value := val_ele.value) != original_val:\n yield key, val_ele, original_val, value\n\n def reset_tag_values(self):\n for key, val_ele, original_val, value in self._iter_changes():\n match val_ele:\n case ListBox() | EditableListBox():\n val_ele.update(choices=original_val, replace=True, select=True)\n case _: # Input() | Text() | CheckBox() | Combo() | Rating()\n val_ele.update(original_val)\n\n def get_modified(self) -> dict[str, tuple[Any, Any]]:\n return {key: (original_val, value) for key, val_ele, original_val, value in self._iter_changes()}\n\n\nclass AlbumInfoFrame(TagModMixin, InteractiveFrame):\n album_info: AlbumInfo\n\n def __init__(self, album: AlbumIdentifier, cover_size: XY = (250, 250), **kwargs):\n super().__init__(**kwargs)\n self.album_info = get_album_info(album)\n self.album_dir = get_album_dir(album)\n self.cover_size = cover_size\n self._tag_vals_and_eles = {}\n\n # region Layout Generation\n\n def get_custom_layout(self) -> Layout:\n yield from self.build_meta_rows()\n yield [self.cover_image_frame, TagFrame([*self.build_tag_rows()], disabled=self.disabled)]\n yield [HorizontalSeparator()]\n yield from self.build_buttons()\n\n def build_meta_rows(self):\n data = {'bitrate_str': set(), 'sample_rate_str': set(), 'bits_per_sample': set()}\n for track in self.album_dir:\n info = track.info\n for key, values in data.items():\n if value := info[key]:\n values.add(str(value))\n\n data = {key: ' / '.join(sorted(values)) for key, values in data.items()}\n yield [\n Text('Bitrate:'), IText(data['bitrate_str'], size=(18, 1)),\n Text('Sample Rate:'), IText(data['sample_rate_str'], size=(18, 1)),\n Text('Bit Depth:'), IText(data['bits_per_sample'], size=(18, 1)),\n ]\n yield [HorizontalSeparator()]\n\n def build_tag_rows(self):\n tooltips = {\n 'name': 'The name that was / should be used for the album directory',\n 'parent': 'The name that was / should be used for the artist directory',\n 'singer': 'Solo singer of a group, when the album should be sorted under their group',\n 'solo_of_group': 'Whether the singer is a soloist',\n }\n disabled = self.disabled\n for key, value in self.album_info.to_dict(skip={'tracks'}, genres_as_set=True).items():\n if tooltip := tooltips.get(key):\n kwargs = {'tooltip': tooltip}\n else:\n kwargs = {}\n\n key_ele = label_ele(key, **kwargs)\n if key == 'type':\n types = [de.real_name for de in DiscoEntryType]\n if value:\n if isinstance(value, DiscoEntryType):\n value = value.real_name\n elif value not in types:\n types.append(value)\n val_ele = Combo(\n types, value, size=(48, None), disabled=disabled, key=key, change_cb=self._update_numbered_type\n )\n elif key == 'genre':\n val_ele = _genre_list_box(value, self.album_info, disabled, key=key)\n elif key in {'mp4', 'solo_of_group'}:\n kwargs['disabled'] = True if key == 'mp4' else disabled\n val_ele = CheckBox('', default=value, pad=(0, 0), key=key, **kwargs)\n else:\n if key.startswith('wiki_'):\n kwargs['link'] = True\n elif key == 'number':\n kwargs['change_cb'] = self._update_numbered_type\n value = _normalize_input_value(value)\n val_ele = Input(value, size=(50, 1), disabled=disabled, key=key, **kwargs)\n\n self._tag_vals_and_eles[key] = (value, val_ele)\n yield [key_ele, val_ele]\n\n @cached_property\n def cover_image_frame(self) -> Frame:\n class ImageMenu(Menu):\n MenuItem('Replace', callback=self._replace_cover_image, enabled=lambda me: not self.disabled)\n # TODO: Include get_wiki_cover_choice?\n\n cover_builder = AlbumCoverImageBuilder(self.album_info, self.cover_size)\n return cover_builder.make_thumbnail_frame(right_click_menu=ImageMenu())\n\n # endregion\n\n # region Layout Generation - Buttons\n\n def build_buttons(self) -> Layout:\n # These frames need to be in the same row for them to occupy the same space when visible\n yield [self.view_buttons_frame, self.edit_buttons_frame]\n\n @cached_property\n def view_buttons_frame(self) -> Frame:\n rows = [[BasicRowFrame(row, side='t')] for row in self._build_view_buttons()]\n return Frame(rows, visible=self.disabled, side='t')\n\n def _build_view_buttons(self) -> Iterator[list[Button]]: # noqa\n kwargs = {'size': (18, 1), 'borderwidth': 3}\n yield [\n EButton('Clean & Add BPM', key='clean_and_add_bpm', **kwargs),\n EButton('View All Tags', key='view_all_tags', **kwargs),\n EButton('Edit', key='edit_album', **kwargs),\n EButton('Wiki Update', key='wiki_update', **kwargs),\n ]\n kwargs['size'] = (25, 1)\n # TODO: Handle replacing inferior versions in real destination directory\n yield [\n # EButton('Sync Ratings Between Albums', key='sync_album_ratings', disabled=True, **kwargs),\n EButton('Sort Into Library', key='sort_into_library', **kwargs),\n # EButton('Copy Tags Between Albums', key='copy_album_tags', disabled=True, **kwargs),\n ]\n yield [\n EButton('Copy Tags To Album...', key='copy_src_album_tags', **kwargs),\n EButton('Copy Tags From Album...', key='copy_dst_album_tags', **kwargs),\n ]\n # TODO: Unify the above/below rows / shorten text / merge functionality with the sort view\n yield [\n EButton('Copy Tags To Lib Album...', key='copy_src_lib_album_tags', **kwargs),\n EButton('Copy Tags From Lib Album...', key='copy_dst_lib_album_tags', **kwargs),\n ]\n\n open_btn = EButton('\\U0001f5c1', key='open', font=LRG_FONT, size=(10, 1), tooltip='Open Album', borderwidth=3)\n album_dir = self.album_dir\n # TODO: handle: music.files.exceptions.InvalidAlbumDir: Invalid album dir - contains directories\n if len(album_dir.parent) > 1:\n kwargs = dict(font=LRG_FONT, size=(5, 1), borderwidth=3)\n yield [\n EButton('\\u2190', key='prev_dir', **kwargs) if album_dir.has_prev_sibling else Spacer(size=(90, 56)),\n open_btn,\n EButton('\\u2192', key='next_dir', **kwargs) if album_dir.has_next_sibling else Spacer(size=(90, 56)),\n ]\n else:\n yield [open_btn]\n\n @cached_property\n def edit_buttons_frame(self) -> BasicRowFrame:\n kwargs = {'size': (18, 1), 'borderwidth': 3}\n row = [EButton('Review & Save Changes', key='save', **kwargs), EButton('Cancel', key='cancel', **kwargs)]\n return BasicRowFrame(row, side='t', anchor='c', visible=not self.disabled)\n\n # endregion\n\n # region Event Handling\n\n def enable(self):\n if not self.disabled:\n return\n super().enable()\n self.view_buttons_frame.hide()\n self.edit_buttons_frame.show()\n\n def disable(self):\n if self.disabled:\n return\n super().disable()\n self.edit_buttons_frame.hide()\n self.view_buttons_frame.show()\n\n def _update_numbered_type(self, var_name, unknown, action):\n # Registered as a change_cb for `type` and `number`\n num_ele: Input = self._tag_vals_and_eles['number'][1]\n value = ''\n try:\n value = num_ele.value.strip()\n num_val = int(value)\n except (TypeError, ValueError, AttributeError):\n num_ele.validated(not value)\n return\n else:\n num_ele.validated(True)\n\n type_val = DiscoEntryType(self._tag_vals_and_eles['type'][1].value)\n if type_val == DiscoEntryType.UNKNOWN:\n return\n\n num_type_ele: Input = self._tag_vals_and_eles['numbered_type'][1]\n num_type_ele.update(type_val.format(num_val))\n\n def _replace_cover_image(self, event=None):\n if self.disabled:\n return\n if path := pick_file_popup(title='Pick new album cover'):\n cover_path_ele: Input = self._tag_vals_and_eles['cover_path'][1]\n cover_path_ele.update(path.as_posix())\n image_ele: Image = self.cover_image_frame.rows[0].elements[0]\n image_ele.image = path\n\n # endregion\n\n\nclass TrackInfoFrame(TagModMixin, InteractiveFrame):\n track_info: TrackInfo\n song_file: SongFile\n show_cover: Bool = False\n\n def __init__(self, track: TrackIdentifier, **kwargs):\n super().__init__(**kwargs)\n self.track_info = get_track_info(track)\n self.song_file = get_track_file(track)\n self._tag_vals_and_eles = {}\n\n @cached_property\n def path_str(self) -> str:\n return self.track_info.path.as_posix()\n\n @cached_property\n def file_name(self) -> str:\n return self.track_info.path.name\n\n def get_custom_layout(self) -> Layout:\n yield from self.build_meta_rows()\n yield from self.build_info_rows()\n\n def build_meta_rows(self) -> Iterator[list[Element]]:\n yield [Text('File:', size=(6, 1)), IText(self.file_name, size=(50, 1))]\n sf = self.song_file\n yield [\n Text('Length:', size=(6, 1)), IText(sf.length_str, size=(10, 1)),\n Text('Type:'), IText(sf.tag_version, size=(20, 1)),\n ]\n\n def build_info_rows(self, keys: Collection[str] = None) -> Iterator[list[Element]]:\n fields = ['artist', 'title', 'name', 'genre', 'disk', 'num', 'rating']\n if keys:\n fields = [f for f in fields if f not in keys]\n\n track_info, disabled = self.track_info, self.disabled\n for key in fields:\n if key == 'genre':\n value = track_info.genre_set.difference(track_info.album.genre_set)\n val_ele = _genre_list_box(value, track_info, disabled)\n elif key == 'rating':\n if (value := track_info[key]) is None:\n value = 0\n val_ele = Rating(value, show_value=True, pad=(0, 0), disabled=disabled)\n else:\n value = _normalize_input_value(track_info[key])\n val_ele = Input(value, size=(50, 1), disabled=disabled)\n\n self._tag_vals_and_eles[key] = (value, val_ele)\n yield [label_ele(key, size=(6, 1)), val_ele]\n\n\ndef _genre_list_box(genres: Collection[str], info: TrackInfo | AlbumInfo, disabled: bool, **kwargs) -> EditableListBox:\n kwargs.setdefault('add_title', 'Add genre')\n kwargs.setdefault('add_prompt', f'Enter a new genre value to add to {info.title!r}')\n kwargs.setdefault('list_width', 40)\n return EditableListBox(sorted(genres), disabled=disabled, val_type=set, **kwargs)\n\n\ndef _normalize_input_value(value) -> str:\n if value is None:\n value = ''\n elif not isinstance(value, str):\n value = str(value)\n return value\n\n\ndef label_ele(text: str, size: XY = (15, 1), **kwargs) -> Text:\n return Text(text.replace('_', ' ').title(), size=size, **kwargs)\n\n\nclass TagFrame(InteractiveFrame):\n def enable(self):\n if not self.disabled:\n return\n\n for row in self.rows:\n for ele in row.elements:\n try:\n if ele.key == 'mp4': # Read-only\n continue\n except AttributeError:\n pass\n try:\n ele.enable() # noqa\n except AttributeError:\n pass\n\n self.disabled = False\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import urllib.request username = '' link = r'https://www.instagram.com/' + username html = urllib.request.urlopen(link) print(html.read())
normal
{ "blob_id": "db93de33f537eeaf64ca8e2b2b79aba1f592305b", "index": 5434, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(html.read())\n", "step-3": "<mask token>\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n", "step-4": "import urllib.request\nusername = ''\nlink = 'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())\n", "step-5": "import urllib.request\n\nusername = ''\nlink = r'https://www.instagram.com/' + username\nhtml = urllib.request.urlopen(link)\nprint(html.read())", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
print('Welcome aboard, Oleksij!')
normal
{ "blob_id": "2b1ec29d665aa93cd53644b62efcd1305b34e13e", "index": 2636, "step-1": "<mask token>\n", "step-2": "print('Welcome aboard, Oleksij!')\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
from app import app from flask import request @app.route('/') @app.route('/index') def index(): return 'Hello world' @app.route('/api_post', methods=['POST']) def postJsonHandler(): print(request.is_json) content = request.get_json() print(content) return 'JSON posted'
normal
{ "blob_id": "9d8c4bf9f9279d5e30d0e9742cdd31713e5f4b9e", "index": 2104, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\[email protected]('/api_post', methods=['POST'])\ndef postJsonHandler():\n print(request.is_json)\n content = request.get_json()\n print(content)\n return 'JSON posted'\n", "step-4": "from app import app\nfrom flask import request\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n return 'Hello world'\n\n\[email protected]('/api_post', methods=['POST'])\ndef postJsonHandler():\n print(request.is_json)\n content = request.get_json()\n print(content)\n return 'JSON posted'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# este script comprar diferente metodos de base2number from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split #from matplotlib import pyplot as plt #from matplotlib import cm import matplotlib.pyplot as plt from matplotlib import pyplot import math import os import sys import cv2 import numpy as np import math from scipy.stats import kurtosis, skew from Bio import SeqIO import pandas as pd import seaborn as sns from descriptor import get_features from descriptor import get_features_glcm from descriptor import get_features_lbp from descriptor import get_features_mlbp from ete3 import PhyloTree, TreeStyle from ete3 import Tree from skbio import DistanceMatrix from skbio.tree import nj current_dir = os.path.dirname(os.path.abspath(__file__)) ################################################################################################################################### ################################################################################################################################### sequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ] names = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris', 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis'] csv_mega = current_dir + "/sample_genomes/seqs_db1_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db1.fasta" results_file = current_dir + "/results/compare_features/db1" ################################################################################################################################### ################################################################################################################################### sequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna', 'V00672.fna', 'V00675.fna'] names = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan'] csv_mega = current_dir + "/sample_genomes/seqs_db2_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db2.fasta" results_file = current_dir + "/results/compare_features/db2" ################################################################################################################################### ################################################################################################################################### sequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna'] names = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow', 'Rat', 'Mouse', 'Platypus'] csv_mega = current_dir + "/sample_genomes/seqs_db3_distances.csv" seq_file_full = current_dir + "/sample_genomes/seqs_db3.fasta" results_file = current_dir + "/results/compare_features/db3" ################################################################################################################################### ################################################################################################################################### data_features_fos = [] data_features_glcm = [] data_features_lbp = [] data_features_mlbp = [] mapping_function_size = 6 # trere is 6 types of mapping functions f_out = open(seq_file_full, "w") for sequence_file in sequences: f_in = open(current_dir + "/sample_genomes/" + sequence_file, "r") f_out.write(f_in.read()) f_in.close() data = [] fa_file = current_dir + "/sample_genomes/" + sequence_file seqs = SeqIO.parse(fa_file, "fasta") for record in seqs: data.append(record.seq.upper()) seq = data[0] temp_fos = [] temp_glcm = [] temp_lbp = [] temp_mlbp = [] # here we evaluate each mapping funciton for mapping_type in range(mapping_function_size): skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type) temp_fos.append( [skewness, my_kurtosis, energy, entropy] ) #rint("fos mapping=",mapping_type, [skewness, my_kurtosis, energy, entropy]) entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type) temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] ) #print("glcm mapping=",mapping_type, [entropy, contrast, energy, correlation, homogeneity]) hist_lbp = get_features_lbp(seq, mapping_type) temp_lbp.append( hist_lbp ) #print("lbp mapping=",mapping_type, hist_lbp) hist_mlbp = get_features_mlbp(seq, mapping_type) temp_mlbp.append( hist_mlbp ) #print("mlbp mapping=",mapping_type, hist_mlbp) data_features_fos.append(temp_fos) data_features_glcm.append(temp_glcm) data_features_lbp.append(temp_lbp) data_features_mlbp.append(temp_mlbp) f_out.close() data_features_fos = np.array(data_features_fos) data_features_glcm = np.array(data_features_glcm) data_features_lbp = np.array(data_features_lbp) data_features_mlbp = np.array(data_features_mlbp) ###################################################################################################################3 # procesamos las distancias con FOS ################################################################################################################### full_distances_fos = [] for mapping_type in range(mapping_function_size): DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0])) for i in range(data_features_fos.shape[0]): row = np.zeros(data_features_fos.shape[0]) for j in range(i, data_features_fos.shape[0]): dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2)) row[j] = dist DIST_fos[i] = row DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos)) DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos)) full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] ) full_distances_fos = np.array(full_distances_fos) print("full_distances_fos", full_distances_fos.shape) ###################################################################################################################3 # procesamos las distancias con GLCM ################################################################################################################### full_distances_glcm = [] for mapping_type in range(mapping_function_size): DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0])) for i in range(data_features_glcm.shape[0]): row = np.zeros(data_features_glcm.shape[0]) for j in range(i, data_features_glcm.shape[0]): dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2)) row[j] = dist DIST_glcm[i] = row DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm)) DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm)) full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] ) full_distances_glcm = np.array(full_distances_glcm) print("full_distances_glcm", full_distances_glcm.shape) ###################################################################################################################3 # procesamos las distancias con LBP ################################################################################################################### full_distances_lbp = [] for mapping_type in range(mapping_function_size): DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0])) for i in range(data_features_lbp.shape[0]): row = np.zeros(data_features_lbp.shape[0]) for j in range(i, data_features_lbp.shape[0]): dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2)) row[j] = dist DIST_lbp[i] = row DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp)) DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp)) full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] ) full_distances_lbp = np.array(full_distances_lbp) print("full_distances_lbp", full_distances_lbp.shape) ###################################################################################################################3 # procesamos las distancias con MLBP ################################################################################################################### full_distances_mlbp = [] for mapping_type in range(mapping_function_size): DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0])) for i in range(data_features_mlbp.shape[0]): row = np.zeros(data_features_mlbp.shape[0]) for j in range(i, data_features_mlbp.shape[0]): dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2)) row[j] = dist DIST_mlbp[i] = row DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp)) DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp)) full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] ) full_distances_mlbp = np.array(full_distances_mlbp) print("full_distances_mlbp", full_distances_mlbp.shape) ################################################################################################################### ### distances from mega ########################################################### ################################################################################################################### mega_dist_csv = pd.read_csv(csv_mega) mega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0]) DIST_mega = mega_dist_csv.values DIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan DIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz distances_mega = DIST_mega[0,1:DIST_mega.shape[0]] distances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega)) ################################################################################################################### ################################################################################################################### names_temp = np.array(sequences) names_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento ###################################################################################################################3 # procesamos las distancias con FOS ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_fos.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con GLCM ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_glcm.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con LBP ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_lbp.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # procesamos las distancias con MLBP ################################################################################################################### plt.clf() fig, axs = plt.subplots(3,2) axs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) axs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4') axs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,0].legend(loc='upper right', fontsize=6) axs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5') axs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[2,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_mlbp.png", dpi = 200, bbox_inches='tight') data_csv = [] error_fos = [] # save the error for each mappoing function with FOS error_glcm = [] # save the error for each mappoing function with GLCM error_lbp = [] # save the error for each mappoing function with LBP error_mlbp = [] # save the error for each mappoing function with MLBP for mapping_type in range(mapping_function_size): error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0]) data_csv.append(error_fos) data_csv.append(error_glcm) data_csv.append(error_lbp) data_csv.append(error_mlbp) data_csv = np.array(data_csv) df = pd.DataFrame(data=data_csv.T, index=["map0", "map1", "map2", "map3", "map4", "map5"], columns=["FOS", "GLCM", "LBP", "MLBP"]) print(df) df.to_csv(results_file + ".csv", index=True) #print(error_fos) #print(error_glcm) #print(error_lbp) #print(error_mlbp) ################################################################################################################### # proccesing a MAPPING 0 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_0map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 1 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_1map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 2 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_2map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 3 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_3map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 4 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_4map.png", dpi = 200, bbox_inches='tight') ################################################################################################################### # proccesing a MAPPING 5 funciton with the all algorithms ################################################################################################################### plt.clf() fig, axs = plt.subplots(2,2) axs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5') axs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,0].legend(loc='upper right', fontsize=6) axs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5') axs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[0,1].legend(loc='upper right', fontsize=6) axs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5') axs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,0].legend(loc='upper right', fontsize=6) axs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5') axs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW') axs[1,1].legend(loc='upper right', fontsize=6) for ax in axs.flat: ax.label_outer() ax.yaxis.set_tick_params(labelsize=6) plt.sca(ax) plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 ) plt.xlabel('Sequences', fontsize=6) fig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 ) plt.savefig( results_file + "_5map.png", dpi = 200, bbox_inches='tight')
normal
{ "blob_id": "9696e5799d46adb5b92c0900e2064b927addfd93", "index": 2506, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\n<mask token>\nprint('full_distances_fos', full_distances_fos.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\n<mask token>\nprint('full_distances_glcm', full_distances_glcm.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\n<mask token>\nprint('full_distances_lbp', full_distances_lbp.shape)\n<mask token>\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\n<mask token>\nprint('full_distances_mlbp', full_distances_mlbp.shape)\n<mask token>\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\n<mask token>\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n<mask token>\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\n<mask token>\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-3": "<mask token>\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-4": "from sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\nsequences = ['J01859.fna', 'NR_037066.fna', 'NR_040849.fna',\n 'NR_117152.fna', 'NR_132306.fna', 'NR_134817.fna', 'NR_134818.fna',\n 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', 'NR_152063.fna',\n 'KP317497.fna', 'NR_156072.fna']\nnames = ['Escherichia coli', 'T.Thermophilus', 'B.Wakoensis',\n 'T.Filiformis', 'T.Tengchongensis', 'S.Cameli', 'S.Tangierensis',\n 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris',\n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\ncsv_mega = current_dir + '/sample_genomes/seqs_db1_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db1.fasta'\nresults_file = current_dir + '/results/compare_features/db1'\nsequences = ['L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna',\n 'M22654.fna', 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna',\n 'V00659.fna', 'V00672.fna', 'V00675.fna']\nnames = ['Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis',\n 'Macaca sylvanus', 'Saimiri sciureus', 'Tarsius syrichta',\n 'Lemur catta', 'Gorilla', 'Hylobates', 'Chimpanzee', 'Sumatran Orangutan']\ncsv_mega = current_dir + '/sample_genomes/seqs_db2_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db2.fasta'\nresults_file = current_dir + '/results/compare_features/db2'\nsequences = ['V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna',\n 'D38115.fna', 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna',\n 'X63726.fna', 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna',\n 'V00654.fna', 'X14848.fna', 'V00711.fna', 'X83427.fna']\nnames = ['Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla',\n 'Orangutan', 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros',\n 'Harbor seal', 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow',\n 'Rat', 'Mouse', 'Platypus']\ncsv_mega = current_dir + '/sample_genomes/seqs_db3_distances.csv'\nseq_file_full = current_dir + '/sample_genomes/seqs_db3.fasta'\nresults_file = current_dir + '/results/compare_features/db3'\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\nmapping_function_size = 6\nf_out = open(seq_file_full, 'w')\nfor sequence_file in sequences:\n f_in = open(current_dir + '/sample_genomes/' + sequence_file, 'r')\n f_out.write(f_in.read())\n f_in.close()\n data = []\n fa_file = current_dir + '/sample_genomes/' + sequence_file\n seqs = SeqIO.parse(fa_file, 'fasta')\n for record in seqs:\n data.append(record.seq.upper())\n seq = data[0]\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type\n )\n temp_fos.append([skewness, my_kurtosis, energy, entropy])\n entropy, contrast, energy, correlation, homogeneity = (\n get_features_glcm(seq, mapping_type))\n temp_glcm.append([entropy, contrast, energy, correlation, homogeneity])\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append(hist_lbp)\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append(hist_mlbp)\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\nf_out.close()\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.\n shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] -\n data_features_fos[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_fos[i] = row\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(\n DIST_fos))\n full_distances_fos.append(DIST_fos[0, 1:DIST_fos.shape[0]])\nfull_distances_fos = np.array(full_distances_fos)\nprint('full_distances_fos', full_distances_fos.shape)\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.\n shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] -\n data_features_glcm[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_glcm[i] = row\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.\n min(DIST_glcm))\n full_distances_glcm.append(DIST_glcm[0, 1:DIST_glcm.shape[0]])\nfull_distances_glcm = np.array(full_distances_glcm)\nprint('full_distances_glcm', full_distances_glcm.shape)\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.\n shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] -\n data_features_lbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_lbp[i] = row\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(\n DIST_lbp))\n full_distances_lbp.append(DIST_lbp[0, 1:DIST_lbp.shape[0]])\nfull_distances_lbp = np.array(full_distances_lbp)\nprint('full_distances_lbp', full_distances_lbp.shape)\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.\n shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] -\n data_features_mlbp[j][mapping_type]) ** 2))\n row[j] = dist\n DIST_mlbp[i] = row\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.\n min(DIST_mlbp))\n full_distances_mlbp.append(DIST_mlbp[0, 1:DIST_mlbp.shape[0]])\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint('full_distances_mlbp', full_distances_mlbp.shape)\nmega_dist_csv = pd.read_csv(csv_mega)\nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0\nDIST_mega = DIST_mega + DIST_mega.T\ndistances_mega = DIST_mega[0, 1:DIST_mega.shape[0]]\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(\n distances_mega) - np.min(distances_mega))\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]]\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_fos.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_glcm.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_lbp.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\naxs[2, 0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 0].legend(loc='upper right', fontsize=6)\naxs[2, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_mlbp.png', dpi=200, bbox_inches='tight')\ndata_csv = []\nerror_fos = []\nerror_glcm = []\nerror_lbp = []\nerror_mlbp = []\nfor mapping_type in range(mapping_function_size):\n error_fos.append(np.sum((full_distances_fos[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_glcm.append(np.sum((full_distances_glcm[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_lbp.append(np.sum((full_distances_lbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\n error_mlbp.append(np.sum((full_distances_mlbp[mapping_type] -\n distances_mega) ** 2) / distances_mega.shape[0])\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=['map0', 'map1', 'map2', 'map3',\n 'map4', 'map5'], columns=['FOS', 'GLCM', 'LBP', 'MLBP'])\nprint(df)\ndf.to_csv(results_file + '.csv', index=True)\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_0map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_1map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_2map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_3map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_4map.png', dpi=200, bbox_inches='tight')\nplt.clf()\nfig, axs = plt.subplots(2, 2)\naxs[0, 0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 0].legend(loc='upper right', fontsize=6)\naxs[0, 1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0, 1].legend(loc='upper right', fontsize=6)\naxs[1, 0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1, 0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 0].legend(loc='upper right', fontsize=6)\naxs[1, 1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1, 1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1, 1].legend(loc='upper right', fontsize=6)\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light',\n fontsize=6)\n plt.xlabel('Sequences', fontsize=6)\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6)\nplt.savefig(results_file + '_5map.png', dpi=200, bbox_inches='tight')\n", "step-5": "# este script comprar diferente metodos de base2number\n\nfrom sklearn.model_selection import KFold \nfrom sklearn.model_selection import train_test_split\n#from matplotlib import pyplot as plt\n#from matplotlib import cm\nimport matplotlib.pyplot as plt \nfrom matplotlib import pyplot\nimport math\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport math\nfrom scipy.stats import kurtosis, skew\nfrom Bio import SeqIO\nimport pandas as pd\nimport seaborn as sns\n\nfrom descriptor import get_features\nfrom descriptor import get_features_glcm\nfrom descriptor import get_features_lbp\nfrom descriptor import get_features_mlbp\n\nfrom ete3 import PhyloTree, TreeStyle\nfrom ete3 import Tree\n\nfrom skbio import DistanceMatrix\nfrom skbio.tree import nj\n\n\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'J01859.fna', 'NR_037066.fna', 'NR_040849.fna', 'NR_117152.fna', 'NR_132306.fna', \n 'NR_134817.fna', 'NR_134818.fna', 'NR_136784.fna', 'NR_148244.fna', 'NR_148787.fna', \n 'NR_152063.fna', 'KP317497.fna', 'NR_156072.fna' ]\n\nnames = [ 'Escherichia coli', 'T.Thermophilus', 'B.Wakoensis', 'T.Filiformis', 'T.Tengchongensis', \n 'S.Cameli', 'S.Tangierensis', 'T.amyloliquefaciens', 'B.Xiamenensis', 'B.Australimaris', \n 'S.Halotolerans', 'B.Maritimus', 'S.Himalayensis']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db1_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db1.fasta\"\nresults_file = current_dir + \"/results/compare_features/db1\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'L00016.fna', 'M22650.fna', 'M22651.fna', 'M22653.fna', 'M22654.fna', \n 'M22655.fna', 'M22656.fna', 'M22657.fna', 'V00658.fna', 'V00659.fna', \n 'V00672.fna', 'V00675.fna']\n\nnames = [ 'Human', 'Macaca mulatta', 'Macaca fuscata', 'Macaca fascicularis', 'Macaca sylvanus', \n 'Saimiri sciureus', 'Tarsius syrichta', 'Lemur catta', 'Gorilla', 'Hylobates', \n 'Chimpanzee', 'Sumatran Orangutan']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db2_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db2.fasta\"\nresults_file = current_dir + \"/results/compare_features/db2\"\n\n###################################################################################################################################\n###################################################################################################################################\n\nsequences = [ 'V00662.fna', 'D38116.fna', 'D38113.fna', 'D38114.fna', 'D38115.fna', \n 'X99256.fna', 'Y18001.fna', 'X79547.fna', 'Y07726.fna', 'X63726.fna', \n 'X72004.fna', 'U20753.fna', 'X61145.fna', 'X72204.fna', 'V00654.fna', \n 'X14848.fna', 'V00711.fna', 'X83427.fna']\n\nnames = [ 'Human', 'Pygmy chimpanzee', 'Common chimpanzee', 'Gorilla', 'Orangutan', \n 'Gibbon', 'Baboon', 'Horse', 'White rhinoceros', 'Harbor seal', \n 'Gray seal', 'Cat', 'Fin whale', 'Blue whale', 'Cow', \n 'Rat', 'Mouse', 'Platypus']\n\ncsv_mega = current_dir + \"/sample_genomes/seqs_db3_distances.csv\"\nseq_file_full = current_dir + \"/sample_genomes/seqs_db3.fasta\"\nresults_file = current_dir + \"/results/compare_features/db3\"\n\n###################################################################################################################################\n###################################################################################################################################\n\ndata_features_fos = []\ndata_features_glcm = []\ndata_features_lbp = []\ndata_features_mlbp = []\n\nmapping_function_size = 6 # trere is 6 types of mapping functions\n\nf_out = open(seq_file_full, \"w\")\n\nfor sequence_file in sequences:\n\n f_in = open(current_dir + \"/sample_genomes/\" + sequence_file, \"r\")\n f_out.write(f_in.read())\n f_in.close()\n\n data = [] \n fa_file = current_dir + \"/sample_genomes/\" + sequence_file\n seqs = SeqIO.parse(fa_file, \"fasta\")\n for record in seqs:\n data.append(record.seq.upper()) \n\n seq = data[0] \n\n temp_fos = []\n temp_glcm = []\n temp_lbp = []\n temp_mlbp = []\n # here we evaluate each mapping funciton\n for mapping_type in range(mapping_function_size):\n skewness, my_kurtosis, energy, entropy = get_features(seq, mapping_type)\n temp_fos.append( [skewness, my_kurtosis, energy, entropy] )\n #rint(\"fos mapping=\",mapping_type, [skewness, my_kurtosis, energy, entropy])\n\n entropy, contrast, energy, correlation, homogeneity = get_features_glcm(seq, mapping_type)\n temp_glcm.append( [entropy, contrast, energy, correlation, homogeneity] )\n #print(\"glcm mapping=\",mapping_type, [entropy, contrast, energy, correlation, homogeneity])\n\n hist_lbp = get_features_lbp(seq, mapping_type)\n temp_lbp.append( hist_lbp )\n #print(\"lbp mapping=\",mapping_type, hist_lbp)\n\n hist_mlbp = get_features_mlbp(seq, mapping_type)\n temp_mlbp.append( hist_mlbp )\n #print(\"mlbp mapping=\",mapping_type, hist_mlbp)\n\n data_features_fos.append(temp_fos)\n data_features_glcm.append(temp_glcm)\n data_features_lbp.append(temp_lbp)\n data_features_mlbp.append(temp_mlbp)\n\nf_out.close()\n\ndata_features_fos = np.array(data_features_fos)\ndata_features_glcm = np.array(data_features_glcm)\ndata_features_lbp = np.array(data_features_lbp)\ndata_features_mlbp = np.array(data_features_mlbp)\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\nfull_distances_fos = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_fos = np.zeros((data_features_fos.shape[0], data_features_fos.shape[0]))\n for i in range(data_features_fos.shape[0]):\n row = np.zeros(data_features_fos.shape[0])\n for j in range(i, data_features_fos.shape[0]):\n dist = np.sqrt(np.sum((data_features_fos[i][mapping_type] - data_features_fos[j][mapping_type])**2))\n row[j] = dist \n DIST_fos[i] = row\n\n DIST_fos = DIST_fos + DIST_fos.T - np.diag(np.diag(DIST_fos))\n DIST_fos = (DIST_fos - np.min(DIST_fos)) / (np.max(DIST_fos) - np.min(DIST_fos))\n full_distances_fos.append( DIST_fos[0,1:DIST_fos.shape[0]] )\n\nfull_distances_fos = np.array(full_distances_fos)\nprint(\"full_distances_fos\", full_distances_fos.shape)\n\n###################################################################################################################3\n# procesamos las distancias con GLCM\n###################################################################################################################\nfull_distances_glcm = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_glcm = np.zeros((data_features_glcm.shape[0], data_features_glcm.shape[0]))\n for i in range(data_features_glcm.shape[0]):\n row = np.zeros(data_features_glcm.shape[0])\n for j in range(i, data_features_glcm.shape[0]):\n dist = np.sqrt(np.sum((data_features_glcm[i][mapping_type] - data_features_glcm[j][mapping_type])**2))\n row[j] = dist \n DIST_glcm[i] = row\n\n DIST_glcm = DIST_glcm + DIST_glcm.T - np.diag(np.diag(DIST_glcm))\n DIST_glcm = (DIST_glcm - np.min(DIST_glcm)) / (np.max(DIST_glcm) - np.min(DIST_glcm))\n full_distances_glcm.append( DIST_glcm[0,1:DIST_glcm.shape[0]] )\n\nfull_distances_glcm = np.array(full_distances_glcm)\nprint(\"full_distances_glcm\", full_distances_glcm.shape)\n\n###################################################################################################################3\n# procesamos las distancias con LBP\n###################################################################################################################\nfull_distances_lbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_lbp = np.zeros((data_features_lbp.shape[0], data_features_lbp.shape[0]))\n for i in range(data_features_lbp.shape[0]):\n row = np.zeros(data_features_lbp.shape[0])\n for j in range(i, data_features_lbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_lbp[i][mapping_type] - data_features_lbp[j][mapping_type])**2))\n row[j] = dist \n DIST_lbp[i] = row\n\n DIST_lbp = DIST_lbp + DIST_lbp.T - np.diag(np.diag(DIST_lbp))\n DIST_lbp = (DIST_lbp - np.min(DIST_lbp)) / (np.max(DIST_lbp) - np.min(DIST_lbp))\n full_distances_lbp.append( DIST_lbp[0,1:DIST_lbp.shape[0]] )\n\nfull_distances_lbp = np.array(full_distances_lbp)\nprint(\"full_distances_lbp\", full_distances_lbp.shape)\n\n###################################################################################################################3\n# procesamos las distancias con MLBP\n###################################################################################################################\nfull_distances_mlbp = []\nfor mapping_type in range(mapping_function_size):\n\n DIST_mlbp = np.zeros((data_features_mlbp.shape[0], data_features_mlbp.shape[0]))\n for i in range(data_features_mlbp.shape[0]):\n row = np.zeros(data_features_mlbp.shape[0])\n for j in range(i, data_features_mlbp.shape[0]):\n dist = np.sqrt(np.sum((data_features_mlbp[i][mapping_type] - data_features_mlbp[j][mapping_type])**2))\n row[j] = dist \n DIST_mlbp[i] = row\n\n DIST_mlbp = DIST_mlbp + DIST_mlbp.T - np.diag(np.diag(DIST_mlbp))\n DIST_mlbp = (DIST_mlbp - np.min(DIST_mlbp)) / (np.max(DIST_mlbp) - np.min(DIST_mlbp))\n full_distances_mlbp.append( DIST_mlbp[0,1:DIST_mlbp.shape[0]] )\n\nfull_distances_mlbp = np.array(full_distances_mlbp)\nprint(\"full_distances_mlbp\", full_distances_mlbp.shape)\n\n###################################################################################################################\n### distances from mega ###########################################################\n###################################################################################################################\nmega_dist_csv = pd.read_csv(csv_mega) \nmega_dist_csv = mega_dist_csv.set_index(mega_dist_csv.columns[0])\nDIST_mega = mega_dist_csv.values\nDIST_mega[np.isnan(DIST_mega)] = 0 # lllenamos con ceros los valores nan\nDIST_mega = DIST_mega + DIST_mega.T #copiamos el triangulo inferior al superir en la matriz\ndistances_mega = DIST_mega[0,1:DIST_mega.shape[0]]\n\ndistances_mega = (distances_mega - np.min(distances_mega)) / (np.max(distances_mega) - np.min(distances_mega))\n###################################################################################################################\n###################################################################################################################\n\nnames_temp = np.array(sequences)\nnames_temp = names_temp[1:names_temp.shape[0]] # eliminamos el primer elemento\n\n###################################################################################################################3\n# procesamos las distancias con FOS\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_fos.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con GLCM\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_glcm.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con LBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_lbp.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# procesamos las distancias con MLBP\n###################################################################################################################\nplt.clf()\nfig, axs = plt.subplots(3,2)\naxs[0,0].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\naxs[2,0].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[2,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,0].legend(loc='upper right', fontsize=6)\naxs[2,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[2,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[2,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_mlbp.png\", dpi = 200, bbox_inches='tight')\n\n\ndata_csv = []\nerror_fos = [] # save the error for each mappoing function with FOS\nerror_glcm = [] # save the error for each mappoing function with GLCM\nerror_lbp = [] # save the error for each mappoing function with LBP\nerror_mlbp = [] # save the error for each mappoing function with MLBP\nfor mapping_type in range(mapping_function_size):\n error_fos.append((np.sum((full_distances_fos[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_glcm.append((np.sum((full_distances_glcm[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_lbp.append((np.sum((full_distances_lbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n error_mlbp.append((np.sum((full_distances_mlbp[mapping_type] - distances_mega)**2))/distances_mega.shape[0])\n\ndata_csv.append(error_fos)\ndata_csv.append(error_glcm)\ndata_csv.append(error_lbp)\ndata_csv.append(error_mlbp)\n\ndata_csv = np.array(data_csv)\ndf = pd.DataFrame(data=data_csv.T, index=[\"map0\", \"map1\", \"map2\", \"map3\", \"map4\", \"map5\"], columns=[\"FOS\", \"GLCM\", \"LBP\", \"MLBP\"])\nprint(df)\ndf.to_csv(results_file + \".csv\", index=True)\n#print(error_fos)\n#print(error_glcm)\n#print(error_lbp)\n#print(error_mlbp)\n\n\n\n###################################################################################################################\n# proccesing a MAPPING 0 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[0], 'b--', label='FOS-MAP0')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[0], 'b--', label='GLCM-MAP0')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[0], 'b--', label='LBP-MAP0')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[0], 'b--', label='MLBP-MAP0')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_0map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 1 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[1], 'b--', label='FOS-MAP1')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[1], 'b--', label='GLCM-MAP1')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[1], 'b--', label='LBP-MAP1')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[1], 'b--', label='MLBP-MAP1')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_1map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 2 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[2], 'b--', label='FOS-MAP2')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[2], 'b--', label='GLCM-MAP2')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[2], 'b--', label='LBP-MAP2')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[2], 'b--', label='MLBP-MAP2')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_2map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 3 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[3], 'b--', label='FOS-MAP3')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[3], 'b--', label='GLCM-MAP3')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[3], 'b--', label='LBP-MAP3')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[3], 'b--', label='MLBP-MAP3')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_3map.png\", dpi = 200, bbox_inches='tight')\n\n\n###################################################################################################################\n# proccesing a MAPPING 4 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[4], 'b--', label='FOS-MAP4')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[4], 'b--', label='GLCM-MAP4')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[4], 'b--', label='LBP-MAP4')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[4], 'b--', label='MLBP-MAP4')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_4map.png\", dpi = 200, bbox_inches='tight')\n\n###################################################################################################################\n# proccesing a MAPPING 5 funciton with the all algorithms\n###################################################################################################################\n\nplt.clf()\nfig, axs = plt.subplots(2,2)\naxs[0,0].plot(names_temp, full_distances_fos[5], 'b--', label='FOS-MAP5')\naxs[0,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,0].legend(loc='upper right', fontsize=6)\naxs[0,1].plot(names_temp, full_distances_glcm[5], 'b--', label='GLCM-MAP5')\naxs[0,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[0,1].legend(loc='upper right', fontsize=6)\naxs[1,0].plot(names_temp, full_distances_lbp[5], 'b--', label='LBP-MAP5')\naxs[1,0].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,0].legend(loc='upper right', fontsize=6)\naxs[1,1].plot(names_temp, full_distances_mlbp[5], 'b--', label='MLBP-MAP5')\naxs[1,1].plot(names_temp, distances_mega, 'r-.', label='CLUSTALW')\naxs[1,1].legend(loc='upper right', fontsize=6)\n\nfor ax in axs.flat:\n ax.label_outer()\n ax.yaxis.set_tick_params(labelsize=6)\n plt.sca(ax)\n plt.xticks(rotation=45, horizontalalignment='right', fontweight='light', fontsize=6 )\n plt.xlabel('Sequences', fontsize=6)\n\nfig.text(0.04, 0.5, 'Distances', va='center', rotation='vertical', fontsize=6 )\nplt.savefig( results_file + \"_5map.png\", dpi = 200, bbox_inches='tight')", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
""" Physical units and dimensions """ from sympy import * from sympy.core.basic import Atom from sympy.core.methods import ArithMeths, RelMeths class Unit(Atom, RelMeths, ArithMeths): is_positive = True # make (m**2)**Rational(1,2) --> m is_commutative = True def __init__(self, name, abbrev): self.name = name self.abbrev = abbrev def tostr(self, level=0): return self.abbrev def __eq__(self, other): return isinstance(other, Unit) and self.name == other.name def defunit(value, *names): u = value g = globals() for name in names: g[name] = u # Dimensionless percent = percents = Rational(1,100) permille = permille = Rational(1,1000) ten = Rational(10) yotta = ten**24 zetta = ten**21 exa = ten**18 peta = ten**15 tera = ten**12 giga = ten**9 mega = ten**6 kilo = ten**3 deca = ten**1 deci = ten**-1 centi = ten**-2 milli = ten**-3 micro = ten**-6 nano = ten**-9 pico = ten**-12 femto = ten**-15 atto = ten**-18 zepto = ten**-21 yocto = ten**-24 rad = radian = radians = 1 deg = degree = degrees = pi/180 # Base units defunit(Unit('meter', 'm'), 'm', 'meter', 'meters') defunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms') defunit(Unit('second', 's'), 's', 'second', 'seconds') defunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes') defunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins') defunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles') defunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas') # Derived units defunit(1/s, 'Hz', 'hz', 'hertz') defunit(m*kg/s**2, 'N', 'newton', 'newtons') defunit(N*m, 'J', 'joule', 'joules') defunit(J/s, 'W', 'watt', 'watts') defunit(N/m**2, 'Pa', 'pa', 'pascal', 'pascals') defunit(s*A, 'C', 'coulomb', 'coulombs') defunit(W/A, 'v', 'V', 'volt', 'volts') defunit(V/A, 'ohm', 'ohms') defunit(A/V, 'S', 'siemens', 'mho', 'mhos') defunit(C/V, 'F', 'farad', 'farads') defunit(J/A, 'Wb', 'wb', 'weber', 'webers') defunit(V*s/m**2, 'T', 'tesla', 'teslas') defunit(V*s/A, 'H', 'henry', 'henrys') # Common length units defunit(kilo*m, 'km', 'kilometer', 'kilometers') defunit(deci*m, 'dm', 'decimeter', 'decimeters') defunit(centi*m, 'cm', 'centimeter', 'centimeters') defunit(milli*m, 'mm', 'millimeter', 'millimeters') defunit(micro*m, 'um', 'micrometer', 'micrometers', 'micron', 'microns') defunit(nano*m, 'nm', 'nanometer', 'nanometers') defunit(pico*m, 'pm', 'picometer', 'picometers') defunit(Rational('0.3048')*m, 'ft', 'foot', 'feet') defunit(Rational('25.4')*mm, 'inch', 'inches') defunit(3*ft, 'yd', 'yard', 'yards') defunit(5280*ft, 'mi', 'mile', 'miles') # Common volume and area units defunit(m**3 / 1000, 'l', 'liter', 'liters') defunit(deci*l, 'dl', 'deciliter', 'deciliters') defunit(centi*l, 'cl', 'centiliter', 'centiliters') defunit(milli*l, 'ml', 'milliliter', 'milliliters') # Common time units defunit(milli*s, 'ms', 'millisecond', 'milliseconds') defunit(micro*s, 'us', 'microsecond', 'microseconds') defunit(nano*s, 'ns', 'nanosecond', 'nanoseconds') defunit(pico*s, 'ps', 'picosecond', 'picoseconds') defunit(60*s, 'minute', 'minutes') defunit(60*minute, 'h', 'hour', 'hours') defunit(24*hour, 'day', 'days') defunit(Rational('31558149.540')*s, 'sidereal_year', 'sidereal_years') defunit(Rational('365.24219')*day, 'tropical_year', 'tropical_years') defunit(Rational('365')*day, 'common_year', 'common_years') defunit(Rational('365.25')*day, 'julian_year', 'julian_years') year = years = tropical_year # Common mass units defunit(kilogram / kilo, 'g', 'gram', 'grams') defunit(milli * g, 'mg', 'milligram', 'milligrams') defunit(micro * g, 'ug', 'microgram', 'micrograms') #---------------------------------------------------------------------------- # Physical constants # c = speed_of_light = 299792458 * m/s G = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2 u0 = magnetic_constant = 4*pi * ten**-7 * N/A**2 e0 = electric_constant = 1/(u0 * c**2) Z0 = vacuum_impedance = u0 * c planck = Rational('6.2606896') * ten**-34 * J*s hbar = planck / (2*pi) avogadro = (Rational('6.02214179') * 10**23) / mol boltzmann = Rational('1.3806505') * ten**-23 * J / K gee = gees = Rational('9.80665') * m/s**2 atmosphere = atmospheres = atm = 101325 * pascal # Other convenient units and magnitudes defunit(c*julian_year, 'ly', 'lightyear', 'lightyears') defunit(149597870691*m, 'au', 'astronomical_unit', 'astronomical_units')
normal
{ "blob_id": "c0e1c0c4545777a669fac19900239ab9baade242", "index": 5993, "step-1": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\n<mask token>\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\ndefunit(1 / s, 'Hz', 'hz', 'hertz')\ndefunit(m * kg / s ** 2, 'N', 'newton', 'newtons')\ndefunit(N * m, 'J', 'joule', 'joules')\ndefunit(J / s, 'W', 'watt', 'watts')\ndefunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')\ndefunit(s * A, 'C', 'coulomb', 'coulombs')\ndefunit(W / A, 'v', 'V', 'volt', 'volts')\ndefunit(V / A, 'ohm', 'ohms')\ndefunit(A / V, 'S', 'siemens', 'mho', 'mhos')\ndefunit(C / V, 'F', 'farad', 'farads')\ndefunit(J / A, 'Wb', 'wb', 'weber', 'webers')\ndefunit(V * s / m ** 2, 'T', 'tesla', 'teslas')\ndefunit(V * s / A, 'H', 'henry', 'henrys')\ndefunit(kilo * m, 'km', 'kilometer', 'kilometers')\ndefunit(deci * m, 'dm', 'decimeter', 'decimeters')\ndefunit(centi * m, 'cm', 'centimeter', 'centimeters')\ndefunit(milli * m, 'mm', 'millimeter', 'millimeters')\ndefunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\ndefunit(nano * m, 'nm', 'nanometer', 'nanometers')\ndefunit(pico * m, 'pm', 'picometer', 'picometers')\ndefunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')\ndefunit(Rational('25.4') * mm, 'inch', 'inches')\ndefunit(3 * ft, 'yd', 'yard', 'yards')\ndefunit(5280 * ft, 'mi', 'mile', 'miles')\ndefunit(m ** 3 / 1000, 'l', 'liter', 'liters')\ndefunit(deci * l, 'dl', 'deciliter', 'deciliters')\ndefunit(centi * l, 'cl', 'centiliter', 'centiliters')\ndefunit(milli * l, 'ml', 'milliliter', 'milliliters')\ndefunit(milli * s, 'ms', 'millisecond', 'milliseconds')\ndefunit(micro * s, 'us', 'microsecond', 'microseconds')\ndefunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')\ndefunit(pico * s, 'ps', 'picosecond', 'picoseconds')\ndefunit(60 * s, 'minute', 'minutes')\ndefunit(60 * minute, 'h', 'hour', 'hours')\ndefunit(24 * hour, 'day', 'days')\ndefunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')\ndefunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')\ndefunit(Rational('365') * day, 'common_year', 'common_years')\ndefunit(Rational('365.25') * day, 'julian_year', 'julian_years')\n<mask token>\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\n<mask token>\ndefunit(c * julian_year, 'ly', 'lightyear', 'lightyears')\ndefunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')\n", "step-4": "<mask token>\nfrom sympy import *\nfrom sympy.core.basic import Atom\nfrom sympy.core.methods import ArithMeths, RelMeths\n\n\nclass Unit(Atom, RelMeths, ArithMeths):\n is_positive = True\n is_commutative = True\n\n def __init__(self, name, abbrev):\n self.name = name\n self.abbrev = abbrev\n\n def tostr(self, level=0):\n return self.abbrev\n\n def __eq__(self, other):\n return isinstance(other, Unit) and self.name == other.name\n\n\ndef defunit(value, *names):\n u = value\n g = globals()\n for name in names:\n g[name] = u\n\n\npercent = percents = Rational(1, 100)\npermille = permille = Rational(1, 1000)\nten = Rational(10)\nyotta = ten ** 24\nzetta = ten ** 21\nexa = ten ** 18\npeta = ten ** 15\ntera = ten ** 12\ngiga = ten ** 9\nmega = ten ** 6\nkilo = ten ** 3\ndeca = ten ** 1\ndeci = ten ** -1\ncenti = ten ** -2\nmilli = ten ** -3\nmicro = ten ** -6\nnano = ten ** -9\npico = ten ** -12\nfemto = ten ** -15\natto = ten ** -18\nzepto = ten ** -21\nyocto = ten ** -24\nrad = radian = radians = 1\ndeg = degree = degrees = pi / 180\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\ndefunit(1 / s, 'Hz', 'hz', 'hertz')\ndefunit(m * kg / s ** 2, 'N', 'newton', 'newtons')\ndefunit(N * m, 'J', 'joule', 'joules')\ndefunit(J / s, 'W', 'watt', 'watts')\ndefunit(N / m ** 2, 'Pa', 'pa', 'pascal', 'pascals')\ndefunit(s * A, 'C', 'coulomb', 'coulombs')\ndefunit(W / A, 'v', 'V', 'volt', 'volts')\ndefunit(V / A, 'ohm', 'ohms')\ndefunit(A / V, 'S', 'siemens', 'mho', 'mhos')\ndefunit(C / V, 'F', 'farad', 'farads')\ndefunit(J / A, 'Wb', 'wb', 'weber', 'webers')\ndefunit(V * s / m ** 2, 'T', 'tesla', 'teslas')\ndefunit(V * s / A, 'H', 'henry', 'henrys')\ndefunit(kilo * m, 'km', 'kilometer', 'kilometers')\ndefunit(deci * m, 'dm', 'decimeter', 'decimeters')\ndefunit(centi * m, 'cm', 'centimeter', 'centimeters')\ndefunit(milli * m, 'mm', 'millimeter', 'millimeters')\ndefunit(micro * m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\ndefunit(nano * m, 'nm', 'nanometer', 'nanometers')\ndefunit(pico * m, 'pm', 'picometer', 'picometers')\ndefunit(Rational('0.3048') * m, 'ft', 'foot', 'feet')\ndefunit(Rational('25.4') * mm, 'inch', 'inches')\ndefunit(3 * ft, 'yd', 'yard', 'yards')\ndefunit(5280 * ft, 'mi', 'mile', 'miles')\ndefunit(m ** 3 / 1000, 'l', 'liter', 'liters')\ndefunit(deci * l, 'dl', 'deciliter', 'deciliters')\ndefunit(centi * l, 'cl', 'centiliter', 'centiliters')\ndefunit(milli * l, 'ml', 'milliliter', 'milliliters')\ndefunit(milli * s, 'ms', 'millisecond', 'milliseconds')\ndefunit(micro * s, 'us', 'microsecond', 'microseconds')\ndefunit(nano * s, 'ns', 'nanosecond', 'nanoseconds')\ndefunit(pico * s, 'ps', 'picosecond', 'picoseconds')\ndefunit(60 * s, 'minute', 'minutes')\ndefunit(60 * minute, 'h', 'hour', 'hours')\ndefunit(24 * hour, 'day', 'days')\ndefunit(Rational('31558149.540') * s, 'sidereal_year', 'sidereal_years')\ndefunit(Rational('365.24219') * day, 'tropical_year', 'tropical_years')\ndefunit(Rational('365') * day, 'common_year', 'common_years')\ndefunit(Rational('365.25') * day, 'julian_year', 'julian_years')\nyear = years = tropical_year\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\nc = speed_of_light = 299792458 * m / s\nG = gravitational_constant = Rational('6.67428'\n ) * ten ** -11 * m ** 3 / kg / s ** 2\nu0 = magnetic_constant = 4 * pi * ten ** -7 * N / A ** 2\ne0 = electric_constant = 1 / (u0 * c ** 2)\nZ0 = vacuum_impedance = u0 * c\nplanck = Rational('6.2606896') * ten ** -34 * J * s\nhbar = planck / (2 * pi)\navogadro = Rational('6.02214179') * 10 ** 23 / mol\nboltzmann = Rational('1.3806505') * ten ** -23 * J / K\ngee = gees = Rational('9.80665') * m / s ** 2\natmosphere = atmospheres = atm = 101325 * pascal\ndefunit(c * julian_year, 'ly', 'lightyear', 'lightyears')\ndefunit(149597870691 * m, 'au', 'astronomical_unit', 'astronomical_units')\n", "step-5": "\"\"\"\r\nPhysical units and dimensions\r\n\r\n\"\"\"\r\n\r\nfrom sympy import *\r\nfrom sympy.core.basic import Atom\r\nfrom sympy.core.methods import ArithMeths, RelMeths\r\n\r\n\r\nclass Unit(Atom, RelMeths, ArithMeths):\r\n is_positive = True # make (m**2)**Rational(1,2) --> m\r\n is_commutative = True\r\n\r\n def __init__(self, name, abbrev):\r\n self.name = name\r\n self.abbrev = abbrev\r\n\r\n def tostr(self, level=0):\r\n return self.abbrev\r\n\r\n def __eq__(self, other):\r\n return isinstance(other, Unit) and self.name == other.name\r\n\r\n\r\ndef defunit(value, *names):\r\n u = value\r\n g = globals()\r\n for name in names:\r\n g[name] = u\r\n\r\n\r\n# Dimensionless\r\n\r\npercent = percents = Rational(1,100)\r\npermille = permille = Rational(1,1000)\r\n\r\nten = Rational(10)\r\n\r\nyotta = ten**24\r\nzetta = ten**21\r\nexa = ten**18\r\npeta = ten**15\r\ntera = ten**12\r\ngiga = ten**9\r\nmega = ten**6\r\nkilo = ten**3\r\ndeca = ten**1\r\ndeci = ten**-1\r\ncenti = ten**-2\r\nmilli = ten**-3\r\nmicro = ten**-6\r\nnano = ten**-9\r\npico = ten**-12\r\nfemto = ten**-15\r\natto = ten**-18\r\nzepto = ten**-21\r\nyocto = ten**-24\r\n\r\nrad = radian = radians = 1\r\ndeg = degree = degrees = pi/180\r\n\r\n\r\n# Base units\r\n\r\ndefunit(Unit('meter', 'm'), 'm', 'meter', 'meters')\r\ndefunit(Unit('kilogram', 'kg'), 'kg', 'kilogram', 'kilograms')\r\ndefunit(Unit('second', 's'), 's', 'second', 'seconds')\r\ndefunit(Unit('ampere', 'A'), 'A', 'ampere', 'amperes')\r\ndefunit(Unit('kelvin', 'K'), 'K', 'kelvin', 'kelvins')\r\ndefunit(Unit('mole', 'mol'), 'mol', 'mole', 'moles')\r\ndefunit(Unit('candela', 'cd'), 'cd', 'candela', 'candelas')\r\n\r\n\r\n# Derived units\r\n\r\ndefunit(1/s, 'Hz', 'hz', 'hertz')\r\ndefunit(m*kg/s**2, 'N', 'newton', 'newtons')\r\ndefunit(N*m, 'J', 'joule', 'joules')\r\ndefunit(J/s, 'W', 'watt', 'watts')\r\ndefunit(N/m**2, 'Pa', 'pa', 'pascal', 'pascals')\r\ndefunit(s*A, 'C', 'coulomb', 'coulombs')\r\ndefunit(W/A, 'v', 'V', 'volt', 'volts')\r\ndefunit(V/A, 'ohm', 'ohms')\r\ndefunit(A/V, 'S', 'siemens', 'mho', 'mhos')\r\ndefunit(C/V, 'F', 'farad', 'farads')\r\ndefunit(J/A, 'Wb', 'wb', 'weber', 'webers')\r\ndefunit(V*s/m**2, 'T', 'tesla', 'teslas')\r\ndefunit(V*s/A, 'H', 'henry', 'henrys')\r\n\r\n\r\n# Common length units\r\n\r\ndefunit(kilo*m, 'km', 'kilometer', 'kilometers')\r\ndefunit(deci*m, 'dm', 'decimeter', 'decimeters')\r\ndefunit(centi*m, 'cm', 'centimeter', 'centimeters')\r\ndefunit(milli*m, 'mm', 'millimeter', 'millimeters')\r\ndefunit(micro*m, 'um', 'micrometer', 'micrometers', 'micron', 'microns')\r\ndefunit(nano*m, 'nm', 'nanometer', 'nanometers')\r\ndefunit(pico*m, 'pm', 'picometer', 'picometers')\r\n\r\ndefunit(Rational('0.3048')*m, 'ft', 'foot', 'feet')\r\ndefunit(Rational('25.4')*mm, 'inch', 'inches')\r\ndefunit(3*ft, 'yd', 'yard', 'yards')\r\ndefunit(5280*ft, 'mi', 'mile', 'miles')\r\n\r\n\r\n# Common volume and area units\r\n\r\ndefunit(m**3 / 1000, 'l', 'liter', 'liters')\r\ndefunit(deci*l, 'dl', 'deciliter', 'deciliters')\r\ndefunit(centi*l, 'cl', 'centiliter', 'centiliters')\r\ndefunit(milli*l, 'ml', 'milliliter', 'milliliters')\r\n\r\n\r\n# Common time units\r\n\r\ndefunit(milli*s, 'ms', 'millisecond', 'milliseconds')\r\ndefunit(micro*s, 'us', 'microsecond', 'microseconds')\r\ndefunit(nano*s, 'ns', 'nanosecond', 'nanoseconds')\r\ndefunit(pico*s, 'ps', 'picosecond', 'picoseconds')\r\n\r\ndefunit(60*s, 'minute', 'minutes')\r\ndefunit(60*minute, 'h', 'hour', 'hours')\r\ndefunit(24*hour, 'day', 'days')\r\n\r\ndefunit(Rational('31558149.540')*s, 'sidereal_year', 'sidereal_years')\r\ndefunit(Rational('365.24219')*day, 'tropical_year', 'tropical_years')\r\ndefunit(Rational('365')*day, 'common_year', 'common_years')\r\ndefunit(Rational('365.25')*day, 'julian_year', 'julian_years')\r\n\r\nyear = years = tropical_year\r\n\r\n\r\n# Common mass units\r\n\r\ndefunit(kilogram / kilo, 'g', 'gram', 'grams')\r\ndefunit(milli * g, 'mg', 'milligram', 'milligrams')\r\ndefunit(micro * g, 'ug', 'microgram', 'micrograms')\r\n\r\n\r\n\r\n#----------------------------------------------------------------------------\r\n# Physical constants\r\n#\r\n\r\nc = speed_of_light = 299792458 * m/s\r\nG = gravitational_constant = Rational('6.67428') * ten**-11 * m**3 / kg / s**2\r\nu0 = magnetic_constant = 4*pi * ten**-7 * N/A**2\r\ne0 = electric_constant = 1/(u0 * c**2)\r\nZ0 = vacuum_impedance = u0 * c\r\n\r\nplanck = Rational('6.2606896') * ten**-34 * J*s\r\nhbar = planck / (2*pi)\r\n\r\navogadro = (Rational('6.02214179') * 10**23) / mol\r\nboltzmann = Rational('1.3806505') * ten**-23 * J / K\r\n\r\ngee = gees = Rational('9.80665') * m/s**2\r\natmosphere = atmospheres = atm = 101325 * pascal\r\n\r\n\r\n# Other convenient units and magnitudes\r\n\r\ndefunit(c*julian_year, 'ly', 'lightyear', 'lightyears')\r\ndefunit(149597870691*m, 'au', 'astronomical_unit', 'astronomical_units')\r\n", "step-ids": [ 5, 6, 7, 9, 10 ] }
[ 5, 6, 7, 9, 10 ]
from random import randrange import random """ both user and computer funcs: """ def check_ok(boat, taken_positions): # input: boat, taken_positions # this func checks if the boat outside the playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat.sort() for i in range(len(boat)): if boat[i] in taken_positions: #this condition checks if the block boat[i] is already in the list taken_positions boat = [-1] break elif boat[i] > 99 or boat[i] < 0: #this condition checks border 1 and 3 boat = [-1] break elif boat[i] % 10 == 9 and i < len(boat) - 1: #this condition checks border 2 and 4 if boat[i + 1] % 10 == 0: boat = [-1] break if i != 0: # this condition checks if there is any hole in the boat if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10: boat = [-1] break return boat def check_shot(shot, ships, hit, miss, comp, sinked_boats): # input: shot, all the boats (ships), hit, miss, comp, sinked_boats # this func initially assumes that the shot is missed (cond = 0) # given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships # if yes, remove the block of the boat that is hitted by the shot # append the shot to hit or comp. If comp, sinked_boats += 1 # if not, append the shot to miss # return: all the boats (ships), hit, miss, comp, cond, sinked_boats cond = 0 # miss for i in range(len(ships)): if shot in ships[i]: ships[i].remove(shot) if len(ships[i]) > 0: hit.append(shot) cond = 1 # hit else: comp.append(shot) cond = 2 # comp sinked_boats += 1 if cond == 0: # miss miss.append(shot) return ships, hit, miss, comp, cond, sinked_boats def create_playground(hit, miss, comp): # input: hit, miss, comp # this func creates the playground with the status of each block # print the playground print(" battleship") print(" 0 1 2 3 4 5 6 7 8 9") block = 0 #this variable keep track of the spot of the block for i in range(10): #create each row row = "" for j in range(10): #create each spot on the specific row character = "_ " if block in miss: character = "x " elif block in hit: character = "o " elif block in comp: character = "Q " row += character block += 1 #the block var increments 1 after each character is add to row print(i, " ", row) print("") def check_empty(ships): # input: ships # [] = False, [#have element] = True # this func checks each ship in the 2D list ships # if ship is empty, return True, and vice versa # if all ships are empty, return True, else return False # return True or False return all([not elem for elem in ships]) """ user - 2 funcs: """ def create_ships_u(taken_positions, num_boats): # input: num_boats # this func has a loop that makes all boats, # which calls the get_ship(len_of_boat, taken_positions) that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: ship, taken_positions = get_ship(len_of_boat, taken_positions) ships.append(ship) return ships, taken_positions def create_playground_u(taken_positions): print(" battleships ") print(" 0 1 2 3 4 5 6 7 8 9") place = 0 for x in range(10): row = "" for y in range(10): ch = " _ " if place in taken_positions: ch = " o " row = row + ch place = place + 1 print(x," ",row) def get_ship(len_of_boat, taken_positions): # input: len_of_boat, taken_positions # this func gets the boat's position from the user's input # this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order # return a valid ship while True: ship = [] print("enter your ship of length", len_of_boat) for i in range(len_of_boat): while True: try: boat_num = input("please enter a number: ") ship.append(int(boat_num)) except ValueError: # better try again... Return to the start of the loop print("wrong type of input") continue else: # is is a correct input, and we're ready to exit the loop break ship = check_ok(ship, taken_positions) if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break taken_positions += ship break else: print("invalid number - please enter again") return ship, taken_positions def get_shot_user(guesses): # input: guesses is the combined list of hit, miss, comp # this funcs asks the user to enter the shot, then checks the validity of the shot # return: the valid shot while True: try: shot = int(input("Enter your shot: ")) if shot < 0 or shot > 99: shot = int(input("Enter your shot:")) elif shot in guesses: print("already guessed - please enter again") else: return shot except: print("incorrect - please enter integer only") """ computer - 1 funcs: """ def create_ships_c(taken_positions, num_boats): # input: num_boats # this funcs has a loop that makes all boats, # which calls the create_boat() that creates a single boat # return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats ships = [] #this is a 2D list contains the positions of all boats for len_of_boat in num_boats: boat_position = [-1] #create the initial position of every boat is [-1] while -1 in boat_position: boat_start = randrange(99) #boat starting point boat_direction = randrange(1, 4) #{1: "up", 2: "right", 3: "down", 4: "left"} boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat #a new boat is created after finishing the while loop ships.append(boat_position) taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions return ships, taken_positions def create_boat(len_of_boat, boat_start, boat_direction, taken_positions): # input: len_of_boat, boat_start, boat_direction, taken_positions # this func initializes boat = [] # with len_of_boat, boat_start, boat_direction, this func create the position of the boat # calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position # return: boat. boat will returned as [-1] or its specific position boat = [] if boat_direction == 1: for i in range(len_of_boat): boat.append(boat_start - i * 10) # already have the position of boat after this line boat = check_ok(boat, taken_positions) elif boat_direction == 2: for i in range(len_of_boat): boat.append(boat_start + i) boat = check_ok(boat, taken_positions) elif boat_direction == 3: for i in range(len_of_boat): boat.append(boat_start + i * 10) boat = check_ok(boat, taken_positions) elif boat_direction == 4: for i in range(len_of_boat): boat.append(boat_start - i) boat = check_ok(boat, taken_positions) return boat def get_shot_comp(guesses, tactics): # input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot) # in the first mơve, tactics = [] # this func checks if len(tactics) > 0 # if yes, pick shot = tactics[0] # if no, pick shot = randrange(99) # this func check if shot not in guesses(which is the list of all moves) # if yes, guess.append(shot), and break # return: the valid shot, guesses while True: try: if len(tactics) > 0: shot = tactics[0] else: shot = randrange(99) if shot not in guesses: guesses.append(shot) break except: print("incorrect - please enter integer only") return shot, guesses def calculate_tactics(shot, tactics, guesses, hit): # input: shot, tactics, guesses, hit # this function takes the newly shot, and changes the tactics list accordingly # the list temp is the possible positions that the next shot can be # if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot # else, the list temp will be created based on the last 2 shots # candidate is the list of valid possible shots that is created from temp # shuffle the order of elements inside candidate # return: candidate (candidate is tactics) temp = [] if len(tactics) < 1: # got 1 hit the first time temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be else: # got at least 2 hits # checks to see if the 4 spots around is in hit if shot - 1 in hit: # east temp = [shot + 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot - num not in hit: temp.append(shot - num) break elif shot + 1 in hit: # west temp = [shot - 1] for num in [2, 3, 4, 5, 6, 7, 8]: if shot + num not in hit: temp.append(shot + num) break elif shot - 10 in hit: # south temp = [shot + 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot - num not in hit: temp.append(shot - num) break elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40 temp = [shot - 10] for num in [20, 30, 40, 50, 60, 70, 80]: if shot + num not in hit: temp.append(shot + num) break candidate = [] # list of valid places that the next shot could be for i in range(len(temp)): if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp candidate.append(temp[i]) random.shuffle(candidate) # shuffle the element order of the list candidate return candidate """ main program: """ num_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length # before game # computer - 1 hit1 = [] miss1 = [] comp1 = [] guesses1 = [] cond1 = 0 tactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to [] taken_positions1 = [] sinked_boats1 = [] # user - 2 hit2 = [] miss2 = [] comp2 = [] guesses2 = [] cond2 = 0 tactics2 = [] taken_positions2 = [] sinked_boats2 = [] # computer creates ships for player 1 ships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) # user creates boat for player 2 - show board ships2, taken_positions2 = create_ships_u(taken_positions2, num_boats) create_playground_u(taken_positions2) # loop for user and computer takes turn to shoot, and repeat until finding a winner: turns = 0 while True: turns += 1 # USER SHOOTS: using 1 because it is checking the data of computer guesses1 = hit1 + miss1 + comp1 shot1 = get_shot_user(guesses1) ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1) create_playground(hit1, miss1, comp1) # check if all of the computer ships are empty: if check_empty(ships1): print("end of game - winner in", turns) break # COMPUTER SHOOTS: guesses2 = hit2 + miss2 + comp2 shot2, guesses2 = get_shot_comp(guesses2, tactics2) ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2) create_playground(hit2, miss2, comp2) if cond2 == 1: # got 1 hit tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2) elif cond2 == 2: # comp, and sunk the boat # reset tactics = [] tactics2 = [] elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves # got 1 hit, then miss # remove the newly shot from tactics tactics2.pop(0) # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still [] # check if all of the computer ships are empty: if check_empty(ships2): print("end of game - computer wins in", turns) break # after both the user and computer shoot, start a new loop:
normal
{ "blob_id": "95584dfdb232be7f507dc9d29ed2f1d95fa2b653", "index": 9642, "step-1": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\n<mask token>\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\n<mask token>\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef check_ok(boat, taken_positions):\n boat.sort()\n for i in range(len(boat)):\n if boat[i] in taken_positions:\n boat = [-1]\n break\n elif boat[i] > 99 or boat[i] < 0:\n boat = [-1]\n break\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\n if boat[i + 1] % 10 == 0:\n boat = [-1]\n break\n if i != 0:\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\n boat = [-1]\n break\n return boat\n\n\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\n cond = 0\n for i in range(len(ships)):\n if shot in ships[i]:\n ships[i].remove(shot)\n if len(ships[i]) > 0:\n hit.append(shot)\n cond = 1\n else:\n comp.append(shot)\n cond = 2\n sinked_boats += 1\n if cond == 0:\n miss.append(shot)\n return ships, hit, miss, comp, cond, sinked_boats\n\n\ndef create_playground(hit, miss, comp):\n print(' battleship')\n print(' 0 1 2 3 4 5 6 7 8 9')\n block = 0\n for i in range(10):\n row = ''\n for j in range(10):\n character = '_ '\n if block in miss:\n character = 'x '\n elif block in hit:\n character = 'o '\n elif block in comp:\n character = 'Q '\n row += character\n block += 1\n print(i, ' ', row)\n print('')\n\n\ndef check_empty(ships):\n return all([(not elem) for elem in ships])\n\n\n<mask token>\n\n\ndef create_ships_u(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\n ships.append(ship)\n return ships, taken_positions\n\n\ndef create_playground_u(taken_positions):\n print(' battleships ')\n print(' 0 1 2 3 4 5 6 7 8 9')\n place = 0\n for x in range(10):\n row = ''\n for y in range(10):\n ch = ' _ '\n if place in taken_positions:\n ch = ' o '\n row = row + ch\n place = place + 1\n print(x, ' ', row)\n\n\ndef get_ship(len_of_boat, taken_positions):\n while True:\n ship = []\n print('enter your ship of length', len_of_boat)\n for i in range(len_of_boat):\n while True:\n try:\n boat_num = input('please enter a number: ')\n ship.append(int(boat_num))\n except ValueError:\n print('wrong type of input')\n continue\n else:\n break\n ship = check_ok(ship, taken_positions)\n if -1 not in ship:\n taken_positions += ship\n break\n else:\n print('invalid number - please enter again')\n return ship, taken_positions\n\n\ndef get_shot_user(guesses):\n while True:\n try:\n shot = int(input('Enter your shot: '))\n if shot < 0 or shot > 99:\n shot = int(input('Enter your shot:'))\n elif shot in guesses:\n print('already guessed - please enter again')\n else:\n return shot\n except:\n print('incorrect - please enter integer only')\n\n\n<mask token>\n\n\ndef create_ships_c(taken_positions, num_boats):\n ships = []\n for len_of_boat in num_boats:\n boat_position = [-1]\n while -1 in boat_position:\n boat_start = randrange(99)\n boat_direction = randrange(1, 4)\n boat_position = create_boat(len_of_boat, boat_start,\n boat_direction, taken_positions)\n ships.append(boat_position)\n taken_positions += boat_position\n return ships, taken_positions\n\n\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\n boat = []\n if boat_direction == 1:\n for i in range(len_of_boat):\n boat.append(boat_start - i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 2:\n for i in range(len_of_boat):\n boat.append(boat_start + i)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 3:\n for i in range(len_of_boat):\n boat.append(boat_start + i * 10)\n boat = check_ok(boat, taken_positions)\n elif boat_direction == 4:\n for i in range(len_of_boat):\n boat.append(boat_start - i)\n boat = check_ok(boat, taken_positions)\n return boat\n\n\ndef get_shot_comp(guesses, tactics):\n while True:\n try:\n if len(tactics) > 0:\n shot = tactics[0]\n else:\n shot = randrange(99)\n if shot not in guesses:\n guesses.append(shot)\n break\n except:\n print('incorrect - please enter integer only')\n return shot, guesses\n\n\ndef calculate_tactics(shot, tactics, guesses, hit):\n temp = []\n if len(tactics) < 1:\n temp = [shot - 1, shot + 1, shot - 10, shot + 10]\n elif shot - 1 in hit:\n temp = [shot + 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 1 in hit:\n temp = [shot - 1]\n for num in [2, 3, 4, 5, 6, 7, 8]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n elif shot - 10 in hit:\n temp = [shot + 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot - num not in hit:\n temp.append(shot - num)\n break\n elif shot + 10 in hit:\n temp = [shot - 10]\n for num in [20, 30, 40, 50, 60, 70, 80]:\n if shot + num not in hit:\n temp.append(shot + num)\n break\n candidate = []\n for i in range(len(temp)):\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1:\n candidate.append(temp[i])\n random.shuffle(candidate)\n return candidate\n\n\n<mask token>\n", "step-5": "from random import randrange\r\nimport random\r\n\r\n\"\"\"\r\nboth user and computer funcs:\r\n\"\"\"\r\ndef check_ok(boat, taken_positions):\r\n# input: boat, taken_positions \r\n# this func checks if the boat outside the playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat.sort()\r\n for i in range(len(boat)):\r\n if boat[i] in taken_positions:\r\n #this condition checks if the block boat[i] is already in the list taken_positions\r\n boat = [-1]\r\n break \r\n elif boat[i] > 99 or boat[i] < 0:\r\n #this condition checks border 1 and 3\r\n boat = [-1]\r\n break\r\n elif boat[i] % 10 == 9 and i < len(boat) - 1:\r\n #this condition checks border 2 and 4\r\n if boat[i + 1] % 10 == 0:\r\n boat = [-1]\r\n break\r\n \r\n if i != 0:\r\n # this condition checks if there is any hole in the boat\r\n if boat[i] != boat[i - 1] + 1 and boat[i] != boat[i - 1] + 10:\r\n boat = [-1]\r\n break\r\n return boat \r\n\r\n\r\ndef check_shot(shot, ships, hit, miss, comp, sinked_boats):\r\n# input: shot, all the boats (ships), hit, miss, comp, sinked_boats\r\n# this func initially assumes that the shot is missed (cond = 0)\r\n# given a shot, this func uses a for-loop that goes through all ships to see if the shot hits one of the ships \r\n# if yes, remove the block of the boat that is hitted by the shot\r\n# append the shot to hit or comp. If comp, sinked_boats += 1\r\n# if not, append the shot to miss\r\n# return: all the boats (ships), hit, miss, comp, cond, sinked_boats\r\n cond = 0 # miss\r\n for i in range(len(ships)):\r\n if shot in ships[i]:\r\n ships[i].remove(shot)\r\n if len(ships[i]) > 0:\r\n hit.append(shot)\r\n cond = 1 # hit\r\n else:\r\n comp.append(shot)\r\n cond = 2 # comp\r\n sinked_boats += 1 \r\n if cond == 0: # miss\r\n miss.append(shot) \r\n return ships, hit, miss, comp, cond, sinked_boats\r\n\r\n\r\ndef create_playground(hit, miss, comp):\r\n# input: hit, miss, comp\r\n# this func creates the playground with the status of each block \r\n# print the playground\r\n print(\" battleship\")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n block = 0 #this variable keep track of the spot of the block\r\n for i in range(10):\r\n #create each row\r\n row = \"\"\r\n for j in range(10):\r\n #create each spot on the specific row\r\n character = \"_ \"\r\n if block in miss:\r\n character = \"x \"\r\n elif block in hit:\r\n character = \"o \" \r\n elif block in comp:\r\n character = \"Q \"\r\n row += character\r\n block += 1 #the block var increments 1 after each character is add to row\r\n print(i, \" \", row)\r\n print(\"\")\r\n\r\n\r\ndef check_empty(ships):\r\n# input: ships\r\n# [] = False, [#have element] = True\r\n# this func checks each ship in the 2D list ships\r\n# if ship is empty, return True, and vice versa\r\n# if all ships are empty, return True, else return False\r\n# return True or False \r\n return all([not elem for elem in ships])\r\n\r\n\r\n\"\"\"\r\nuser - 2 funcs:\r\n\"\"\"\r\ndef create_ships_u(taken_positions, num_boats):\r\n# input: num_boats\r\n# this func has a loop that makes all boats,\r\n# which calls the get_ship(len_of_boat, taken_positions) that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n ship, taken_positions = get_ship(len_of_boat, taken_positions)\r\n ships.append(ship)\r\n return ships, taken_positions\r\n\r\n \r\ndef create_playground_u(taken_positions):\r\n print(\" battleships \")\r\n print(\" 0 1 2 3 4 5 6 7 8 9\")\r\n \r\n place = 0\r\n for x in range(10):\r\n row = \"\"\r\n for y in range(10):\r\n ch = \" _ \"\r\n if place in taken_positions:\r\n ch = \" o \" \r\n row = row + ch\r\n place = place + 1\r\n \r\n print(x,\" \",row)\r\n\r\n\r\ndef get_ship(len_of_boat, taken_positions):\r\n# input: len_of_boat, taken_positions\r\n# this func gets the boat's position from the user's input\r\n# this func checks both the type of the input(is it int) and if the boat is inside playground/in taken_positions/in correct order \r\n# return a valid ship \r\n while True:\r\n ship = []\r\n print(\"enter your ship of length\", len_of_boat)\r\n for i in range(len_of_boat):\r\n while True:\r\n try:\r\n boat_num = input(\"please enter a number: \")\r\n ship.append(int(boat_num))\r\n except ValueError: # better try again... Return to the start of the loop\r\n print(\"wrong type of input\")\r\n continue\r\n else: # is is a correct input, and we're ready to exit the loop\r\n break\r\n ship = check_ok(ship, taken_positions)\r\n\r\n if -1 not in ship: # check if a ship is valid. If yes, add the ship to taken_positions and break\r\n taken_positions += ship\r\n break\r\n else:\r\n print(\"invalid number - please enter again\")\r\n return ship, taken_positions\r\n\r\n\r\ndef get_shot_user(guesses):\r\n# input: guesses is the combined list of hit, miss, comp\r\n# this funcs asks the user to enter the shot, then checks the validity of the shot \r\n# return: the valid shot\r\n while True:\r\n try:\r\n shot = int(input(\"Enter your shot: \"))\r\n if shot < 0 or shot > 99:\r\n shot = int(input(\"Enter your shot:\"))\r\n elif shot in guesses:\r\n print(\"already guessed - please enter again\")\r\n else:\r\n return shot\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n\r\n\r\n\"\"\"\r\ncomputer - 1 funcs:\r\n\"\"\"\r\ndef create_ships_c(taken_positions, num_boats):\r\n# input: num_boats\r\n# this funcs has a loop that makes all boats,\r\n# which calls the create_boat() that creates a single boat\r\n# return: ships, which are the 2D list has len(num_boats) that contains the positions of all boats\r\n ships = [] #this is a 2D list contains the positions of all boats\r\n for len_of_boat in num_boats:\r\n boat_position = [-1] #create the initial position of every boat is [-1]\r\n while -1 in boat_position:\r\n boat_start = randrange(99) #boat starting point\r\n boat_direction = randrange(1, 4) #{1: \"up\", 2: \"right\", 3: \"down\", 4: \"left\"}\r\n boat_position = create_boat(len_of_boat, boat_start, boat_direction, taken_positions) #return the position of boat\r\n #a new boat is created after finishing the while loop\r\n ships.append(boat_position)\r\n taken_positions += boat_position #add all positions of the newly created boat to the list taken_positions\r\n return ships, taken_positions\r\n\r\n\r\ndef create_boat(len_of_boat, boat_start, boat_direction, taken_positions):\r\n# input: len_of_boat, boat_start, boat_direction, taken_positions\r\n# this func initializes boat = []\r\n# with len_of_boat, boat_start, boat_direction, this func create the position of the boat\r\n# calls check_ok(boat, taken_positions) to see if the boat outside playground or the position of the boat is already in taken_position\r\n# return: boat. boat will returned as [-1] or its specific position\r\n boat = []\r\n if boat_direction == 1:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i * 10) # already have the position of boat after this line\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 2:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 3:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start + i * 10)\r\n boat = check_ok(boat, taken_positions)\r\n elif boat_direction == 4:\r\n for i in range(len_of_boat):\r\n boat.append(boat_start - i)\r\n boat = check_ok(boat, taken_positions)\r\n return boat\r\n\r\n\r\ndef get_shot_comp(guesses, tactics):\r\n# input: guesses (all moves), tactics(which is the list of all valid possible moves for the shot)\r\n# in the first mơve, tactics = []\r\n# this func checks if len(tactics) > 0\r\n# if yes, pick shot = tactics[0]\r\n# if no, pick shot = randrange(99)\r\n# this func check if shot not in guesses(which is the list of all moves) \r\n# if yes, guess.append(shot), and break\r\n# return: the valid shot, guesses\r\n while True:\r\n try:\r\n if len(tactics) > 0:\r\n shot = tactics[0]\r\n else:\r\n shot = randrange(99)\r\n \r\n if shot not in guesses:\r\n guesses.append(shot)\r\n break\r\n except:\r\n print(\"incorrect - please enter integer only\")\r\n return shot, guesses\r\n\r\n\r\ndef calculate_tactics(shot, tactics, guesses, hit):\r\n# input: shot, tactics, guesses, hit\r\n# this function takes the newly shot, and changes the tactics list accordingly\r\n# the list temp is the possible positions that the next shot can be\r\n# if the shot hits the first time, len(tactics) = 0. Then, temp is the list contains 4 blocks around the shot\r\n# else, the list temp will be created based on the last 2 shots\r\n# candidate is the list of valid possible shots that is created from temp\r\n# shuffle the order of elements inside candidate\r\n# return: candidate (candidate is tactics)\r\n temp = []\r\n if len(tactics) < 1:\r\n # got 1 hit the first time \r\n temp = [shot - 1, shot + 1, shot - 10, shot + 10] # temporary places that the next shot could be \r\n else: \r\n # got at least 2 hits \r\n # checks to see if the 4 spots around is in hit\r\n if shot - 1 in hit: # east\r\n temp = [shot + 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n\r\n elif shot + 1 in hit: # west\r\n temp = [shot - 1]\r\n for num in [2, 3, 4, 5, 6, 7, 8]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n elif shot - 10 in hit: # south\r\n temp = [shot + 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot - num not in hit:\r\n temp.append(shot - num) \r\n break\r\n \r\n elif shot + 10 in hit: # north. Ex: first shot is 50, next shot is 40\r\n temp = [shot - 10]\r\n for num in [20, 30, 40, 50, 60, 70, 80]:\r\n if shot + num not in hit:\r\n temp.append(shot + num) \r\n break\r\n \r\n candidate = [] # list of valid places that the next shot could be\r\n for i in range(len(temp)):\r\n if temp[i] not in guesses and temp[i] < 100 and temp[i] > -1: #checks the validity of places in temp\r\n candidate.append(temp[i])\r\n random.shuffle(candidate) # shuffle the element order of the list candidate\r\n return candidate\r\n\r\n\r\n\r\n\"\"\"\r\nmain program:\r\n\"\"\"\r\nnum_boats = [5, 4, 3, 3, 2, 2] # this list contains all boats. Each boat is represented by its length \r\n\r\n# before game\r\n# computer - 1\r\nhit1 = []\r\nmiss1 = []\r\ncomp1 = []\r\nguesses1 = []\r\ncond1 = 0\r\ntactics1 = [] # list of possible moves after a boat is hitted. After a boat is sunked, tactics reset to []\r\ntaken_positions1 = []\r\nsinked_boats1 = []\r\n\r\n# user - 2\r\nhit2 = []\r\nmiss2 = []\r\ncomp2 = []\r\nguesses2 = []\r\ncond2 = 0\r\ntactics2 = []\r\ntaken_positions2 = []\r\nsinked_boats2 = []\r\n\r\n# computer creates ships for player 1\r\nships1, taken_positions1 = create_ships_c(taken_positions1, num_boats) \r\n# user creates boat for player 2 - show board\r\nships2, taken_positions2 = create_ships_u(taken_positions2, num_boats)\r\ncreate_playground_u(taken_positions2)\r\n\r\n# loop for user and computer takes turn to shoot, and repeat until finding a winner:\r\nturns = 0\r\nwhile True: \r\n turns += 1\r\n\r\n# USER SHOOTS: using 1 because it is checking the data of computer\r\n guesses1 = hit1 + miss1 + comp1\r\n shot1 = get_shot_user(guesses1)\r\n ships1, hit1, miss1, comp1, cond1, sinked_boats1 = check_shot(shot1, ships1, hit1, miss1, comp1, sinked_boats1)\r\n create_playground(hit1, miss1, comp1)\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships1):\r\n print(\"end of game - winner in\", turns)\r\n break\r\n\r\n# COMPUTER SHOOTS:\r\n guesses2 = hit2 + miss2 + comp2\r\n shot2, guesses2 = get_shot_comp(guesses2, tactics2) \r\n ships2, hit2, miss2, comp2, cond2, sinked_boats2 = check_shot(shot2, ships2, hit2, miss2, comp2, sinked_boats2)\r\n create_playground(hit2, miss2, comp2)\r\n\r\n if cond2 == 1:\r\n # got 1 hit\r\n tactics2 = calculate_tactics(shot2, tactics2, guesses2, hit2)\r\n elif cond2 == 2:\r\n # comp, and sunk the boat\r\n # reset tactics = []\r\n tactics2 = []\r\n elif len(tactics2) > 0: #len(tactics) > 0 means that there are still possible moves\r\n # got 1 hit, then miss\r\n # remove the newly shot from tactics\r\n tactics2.pop(0)\r\n # in case all 3 statements above are False, which means there is no hit in the first place, tactics is still []\r\n\r\n# check if all of the computer ships are empty:\r\n if check_empty(ships2):\r\n print(\"end of game - computer wins in\", turns)\r\n break\r\n\r\n# after both the user and computer shoot, start a new loop:\r\n\r\n", "step-ids": [ 7, 10, 11, 12, 16 ] }
[ 7, 10, 11, 12, 16 ]
import os import string filenames = os.listdir('data/SENTIMENT_test') filenames.sort() outfile = open('sentiment_test.txt', 'w') remove_punctuation_map = dict((ord(char), None) for char in string.punctuation) for filename in filenames: infile = open('data/SENTIMENT_test/' + filename, errors='ignore') infiletext = infile.read() infiletext = infiletext.replace('\n', ' ') infiletext = infiletext.translate(remove_punctuation_map) outfile.write(infiletext + '\n') infile.close() outfile.close()
normal
{ "blob_id": "6434e427c9015544985a38104cffeaa10866b9ea", "index": 4585, "step-1": "<mask token>\n", "step-2": "<mask token>\nfilenames.sort()\n<mask token>\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n", "step-3": "<mask token>\nfilenames = os.listdir('data/SENTIMENT_test')\nfilenames.sort()\noutfile = open('sentiment_test.txt', 'w')\nremove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n", "step-4": "import os\nimport string\nfilenames = os.listdir('data/SENTIMENT_test')\nfilenames.sort()\noutfile = open('sentiment_test.txt', 'w')\nremove_punctuation_map = dict((ord(char), None) for char in string.punctuation)\nfor filename in filenames:\n infile = open('data/SENTIMENT_test/' + filename, errors='ignore')\n infiletext = infile.read()\n infiletext = infiletext.replace('\\n', ' ')\n infiletext = infiletext.translate(remove_punctuation_map)\n outfile.write(infiletext + '\\n')\n infile.close()\noutfile.close()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from bs4 import BeautifulSoup import re class Rules: def __init__(self): self.ruleCollection = { "1" : self.rule1, "2" : self.rule2, "3" : self.rule3, "4" : self.rule4, "5" : self.rule5, "6" : self.rule6, "7" : self.rule7, "8" : self.rule8, "9" : self.rule9, "10" : self.rule10, } def getRule(self, id): return self.ruleCollection[id] # Image without text alternatives def rule1(self,dom): return dom.find_all(self._img_without_alt) # Embeded multimedia without noembed (text or audio) def rule2(self,dom): video_arr = [] for embed in dom.find_all("embed"): if not embed.noembed: video_arr.append(embed) return video_arr #color cues #without the definitions in css #This rule needs to be improved def rule3(self,dom): clrcue_arr = [] for fnt in dom.find_all('font'): if fnt.has_attr('color'): clrcue_arr.append(fnt) for spn in dom.find_all('span'): if spn.has_attr('style'): clrcue_arr.append(spn) return clrcue_arr #Table without summary def rule4(self,dom): return dom.find_all(self._tbl_without_summ) #Table without caption def rule5(self,dom): twcap_arr = []; for tb in dom.find_all("table"): if not tb.caption: twcap_arr.append(tb) return twcap_arr def rule6(self,dom): lbl_arr = []; inputElems =[] inputElems.extend(dom.find_all(["textarea", "select"])) inputElems.extend(dom.find_all(type=["text","password", "checkbox", "radio", "file"])) labels = dom.find_all('label') for input in inputElems: hasLabel = False if input.has_attr('id'): id = input['id'] for lbl in labels: if lbl.has_attr("for") and lbl['for'] == id: hasLabel = True break if not hasLabel: lbl_arr.append(input) return lbl_arr def rule7(self,dom): dblclk_arr = [] dblclk_arr = dom.find_all(ondblclick = True, onkeypress = False) return dblclk_arr def rule8(self,dom): title_arr = [] isTitle = dom.find('title') if isTitle is None: title_arr.append(dom.find('head')) return title_arr def rule9(self,dom): link_arr = [] url_tags = ['http', 'https', '://www.' , 'www' ] for link in dom.find_all('a'): if not ('http' in link or 'https' in link or '://www.' in link or 'www' in link): link_arr.append(link) return link_arr def rule10(self,dom): tab_arr = [] for tab in dom.find_all('a', 'input', ondblclick = True, onkeydown = True, onkeypress = True): if not tab.has_attr('tabindex'): tab_arr.append(tab) return tab_arr def _img_without_alt(self,tag): return tag.name == "img" and not tag.has_attr("alt") def _tbl_without_summ(self,tag): return tag.name == "table" and not tag.has_attr("summary") #for testing
normal
{ "blob_id": "7747cbb1a1ed191b616b0d1bcfd51cdea05067f5", "index": 5954, "step-1": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n <mask token>\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n <mask token>\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n <mask token>\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n <mask token>\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n <mask token>\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n", "step-2": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n <mask token>\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n <mask token>\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n", "step-3": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n\n def rule2(self, dom):\n video_arr = []\n for embed in dom.find_all('embed'):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n\n def rule5(self, dom):\n twcap_arr = []\n for tb in dom.find_all('table'):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n <mask token>\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n", "step-4": "<mask token>\n\n\nclass Rules:\n\n def __init__(self):\n self.ruleCollection = {'1': self.rule1, '2': self.rule2, '3': self.\n rule3, '4': self.rule4, '5': self.rule5, '6': self.rule6, '7':\n self.rule7, '8': self.rule8, '9': self.rule9, '10': self.rule10}\n\n def getRule(self, id):\n return self.ruleCollection[id]\n\n def rule1(self, dom):\n return dom.find_all(self._img_without_alt)\n\n def rule2(self, dom):\n video_arr = []\n for embed in dom.find_all('embed'):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n\n def rule3(self, dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n <mask token>\n\n def rule5(self, dom):\n twcap_arr = []\n for tb in dom.find_all('table'):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n\n def rule6(self, dom):\n lbl_arr = []\n inputElems = []\n inputElems.extend(dom.find_all(['textarea', 'select']))\n inputElems.extend(dom.find_all(type=['text', 'password', 'checkbox',\n 'radio', 'file']))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n for lbl in labels:\n if lbl.has_attr('for') and lbl['for'] == id:\n hasLabel = True\n break\n if not hasLabel:\n lbl_arr.append(input)\n return lbl_arr\n\n def rule7(self, dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick=True, onkeypress=False)\n return dblclk_arr\n\n def rule8(self, dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n\n def rule9(self, dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.', 'www']\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or\n 'www' in link):\n link_arr.append(link)\n return link_arr\n\n def rule10(self, dom):\n tab_arr = []\n for tab in dom.find_all('a', 'input', ondblclick=True, onkeydown=\n True, onkeypress=True):\n if not tab.has_attr('tabindex'):\n tab_arr.append(tab)\n return tab_arr\n\n def _img_without_alt(self, tag):\n return tag.name == 'img' and not tag.has_attr('alt')\n\n def _tbl_without_summ(self, tag):\n return tag.name == 'table' and not tag.has_attr('summary')\n", "step-5": "from bs4 import BeautifulSoup\nimport re\n\nclass Rules:\n def __init__(self):\n self.ruleCollection = {\n \"1\" : self.rule1,\n \"2\" : self.rule2,\n \"3\" : self.rule3,\n \"4\" : self.rule4,\n \"5\" : self.rule5,\n \"6\" : self.rule6,\n \"7\" : self.rule7,\n \"8\" : self.rule8,\n \"9\" : self.rule9,\n \"10\" : self.rule10,\n }\n \n def getRule(self, id):\n return self.ruleCollection[id]\n # Image without text alternatives\n def rule1(self,dom):\n return dom.find_all(self._img_without_alt)\n \n # Embeded multimedia without noembed (text or audio)\n def rule2(self,dom):\n video_arr = []\n for embed in dom.find_all(\"embed\"):\n if not embed.noembed:\n video_arr.append(embed)\n return video_arr\n \n #color cues\n #without the definitions in css\n #This rule needs to be improved\n def rule3(self,dom):\n clrcue_arr = []\n for fnt in dom.find_all('font'):\n if fnt.has_attr('color'):\n clrcue_arr.append(fnt)\n for spn in dom.find_all('span'):\n if spn.has_attr('style'):\n clrcue_arr.append(spn)\n return clrcue_arr\n \n #Table without summary\n def rule4(self,dom):\n return dom.find_all(self._tbl_without_summ)\n \n \n #Table without caption\n def rule5(self,dom):\n twcap_arr = [];\n for tb in dom.find_all(\"table\"):\n if not tb.caption:\n twcap_arr.append(tb)\n return twcap_arr\n \n def rule6(self,dom):\n lbl_arr = [];\n inputElems =[]\n inputElems.extend(dom.find_all([\"textarea\", \"select\"]))\n inputElems.extend(dom.find_all(type=[\"text\",\"password\", \"checkbox\", \"radio\", \"file\"]))\n labels = dom.find_all('label')\n for input in inputElems:\n hasLabel = False\n if input.has_attr('id'):\n id = input['id']\n \n for lbl in labels:\n if lbl.has_attr(\"for\") and lbl['for'] == id:\n hasLabel = True\n break\n \n if not hasLabel:\n lbl_arr.append(input)\n\n return lbl_arr\n \n def rule7(self,dom):\n dblclk_arr = []\n dblclk_arr = dom.find_all(ondblclick = True, onkeypress = False)\n return dblclk_arr\n \n def rule8(self,dom):\n title_arr = []\n isTitle = dom.find('title')\n if isTitle is None:\n title_arr.append(dom.find('head'))\n return title_arr\n \n def rule9(self,dom):\n link_arr = []\n url_tags = ['http', 'https', '://www.' , 'www' ]\n for link in dom.find_all('a'):\n if not ('http' in link or 'https' in link or '://www.' in link or 'www' in link):\n link_arr.append(link)\n \n return link_arr\n \n def rule10(self,dom):\n tab_arr = []\n for tab in dom.find_all('a', 'input', ondblclick = True, onkeydown = True, onkeypress = True):\n if not tab.has_attr('tabindex'):\n tab_arr.append(tab)\n \n return tab_arr \n \n def _img_without_alt(self,tag):\n return tag.name == \"img\" and not tag.has_attr(\"alt\")\n \n def _tbl_without_summ(self,tag):\n return tag.name == \"table\" and not tag.has_attr(\"summary\")\n \n#for testing\n\n\n\n", "step-ids": [ 8, 11, 13, 14, 17 ] }
[ 8, 11, 13, 14, 17 ]
print('-' * 60) print( 'Welcome to CLUB425, the most lit club in downtown ACTvF. Before you can enter, I need you yo answer some question...' ) print() age = input('What is your age today? ') age = int(age) if age >= 21: print('Cool, come on in.') else: print( 'Your gonna need to back up. This club is 21+ only so find somewhere else to party or find out what robot punches feel like. ' ) print('Anyway...have a good day! ') print('-' * 60)
normal
{ "blob_id": "19ffac718008c7c9279fb8cbc7608597d2d3e708", "index": 3937, "step-1": "<mask token>\n", "step-2": "print('-' * 60)\nprint(\n 'Welcome to CLUB425, the most lit club in downtown ACTvF. Before you can enter, I need you yo answer some question...'\n )\nprint()\n<mask token>\nif age >= 21:\n print('Cool, come on in.')\nelse:\n print(\n 'Your gonna need to back up. This club is 21+ only so find somewhere else to party or find out what robot punches feel like. '\n )\n print('Anyway...have a good day! ')\nprint('-' * 60)\n", "step-3": "print('-' * 60)\nprint(\n 'Welcome to CLUB425, the most lit club in downtown ACTvF. Before you can enter, I need you yo answer some question...'\n )\nprint()\nage = input('What is your age today? ')\nage = int(age)\nif age >= 21:\n print('Cool, come on in.')\nelse:\n print(\n 'Your gonna need to back up. This club is 21+ only so find somewhere else to party or find out what robot punches feel like. '\n )\n print('Anyway...have a good day! ')\nprint('-' * 60)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import random def Fun_hiraganas(): hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko', 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na', 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho'] print("escriba el hiragana", hiraganas[random.randint(0, len(hiraganas)-1)]) print("Hello, type exit if you want to leave") answer = "" while answer.lower() != 'exit': Fun_hiraganas() answer = input("Type exit if you want to leave") print("bye")
normal
{ "blob_id": "1fe7d5db1b47ba082301d07d010c6796fbd7edb7", "index": 6859, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\n<mask token>\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n", "step-3": "<mask token>\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\nanswer = ''\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n", "step-4": "import random\n\n\ndef Fun_hiraganas():\n hiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko',\n 'sa', 'shi', 'su', 'se', 'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na',\n 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n print('escriba el hiragana', hiraganas[random.randint(0, len(hiraganas) -\n 1)])\n\n\nprint('Hello, type exit if you want to leave')\nanswer = ''\nwhile answer.lower() != 'exit':\n Fun_hiraganas()\n answer = input('Type exit if you want to leave')\nprint('bye')\n", "step-5": "import random\n\ndef Fun_hiraganas():\n\thiraganas = ['a', 'i', 'u', 'e', 'o', 'ka', 'ki', 'ku', 'ke', 'ko', 'sa', 'shi', 'su', 'se', \n\t'so', 'ta', 'chi', 'tsu', 'te', 'to', 'na', 'ni', 'nu', 'ne', 'no', 'ha', 'hi', 'fu', 'he', 'ho']\n\tprint(\"escriba el hiragana\", hiraganas[random.randint(0, len(hiraganas)-1)])\n\nprint(\"Hello, type exit if you want to leave\")\nanswer = \"\"\nwhile answer.lower() != 'exit':\n\tFun_hiraganas() \n\tanswer = input(\"Type exit if you want to leave\")\nprint(\"bye\")\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
from django.contrib.auth.decorators import login_required from django.shortcuts import render from orders.models import Setting def search(request): return render(request, 'ui/search.html') def search_printed(request): print_url = '' setting = Setting.objects.filter(name='printer').first() if setting != None: print_url = setting.value return render(request, 'ui/search.html', {'print_url': print_url}) @login_required def queue(request): print_url = '' setting = Setting.objects.filter(name='printer_admin').first() if setting != None: print_url = setting.value return render(request, 'ui/queue.html', {'print_url': print_url, 'footer': True}) def queue_tablet(request): print_url = '' setting = Setting.objects.filter(name='printer_admin').first() if setting != None: print_url = setting.value return render(request, 'ui/queue.html', {'print_url': print_url, 'footer': False})
normal
{ "blob_id": "f16d43d9dfb3e9b9589fa92eb82aaa4c73fe48cd", "index": 1264, "step-1": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n<mask token>\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n", "step-3": "<mask token>\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n", "step-4": "from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render\nfrom orders.models import Setting\n\n\ndef search(request):\n return render(request, 'ui/search.html')\n\n\ndef search_printed(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/search.html', {'print_url': print_url})\n\n\n@login_required\ndef queue(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': True})\n\n\ndef queue_tablet(request):\n print_url = ''\n setting = Setting.objects.filter(name='printer_admin').first()\n if setting != None:\n print_url = setting.value\n return render(request, 'ui/queue.html', {'print_url': print_url,\n 'footer': False})\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
#Eyal Reis - 203249354 from view import View def main(): """ primary game method """ view = View() view.root.mainloop() if __name__ == "__main__": main()
normal
{ "blob_id": "640eae824e43e394bf0624dd4cf7dcec78f43604", "index": 4947, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from view import View\n\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#Eyal Reis - 203249354\n\nfrom view import View\n\ndef main():\n \"\"\"\n primary game method\n \"\"\"\n view = View()\n view.root.mainloop()\n \nif __name__ == \"__main__\":\n main()\n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- """ Created on Sun Oct 4 12:14:16 2020 @author: mdevasish """ import pandas as pd import numpy as np from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.metrics import mean_squared_error,mean_absolute_error from sklearn.model_selection import train_test_split import joblib import seaborn as sns import matplotlib.pyplot as plt class model_construction: def __init__(self,data,model,fit_intercept = True,alpha = 1.0, max_iter = 1000, solver = 'auto'): ''' Constructor to set the values before creating the model Input Parameters : data : Input DataFrame model : Model to be implemented alpha : Regularization constant applicable for Ridge and Lasso max_iter : Maximimum iterations applicable for Lasso solver : Type of solver to use applicable for Ridge ''' self.data = data self.alpha = alpha self.max_iter = max_iter self.solver = solver self.fit_intercept = fit_intercept if model == 'LinearRegression': self.model = LinearRegression(fit_intercept = self.fit_intercept) elif model == 'Lasso': self.model = Lasso(alpha = self.alpha,max_iter = self.max_iter,fit_intercept = self.fit_intercept) elif model == 'Ridge': self.model = Ridge(alpha = self.alpha,solver = self.solver,fit_intercept = self.fit_intercept) else: raise Exception('Wrong input model') def implement_model(self,filename): ''' Method inside the model_construction class, used for implementing the model and return feature importance and dataframe with actual values and predicted values of validation set Input : tsize : size of the dataset for the validation default value 0.3 random_val : Seed for randomness for reproducibility default value 2020 Returns : fimp : Feature importance of a model diag : diagnostic dataframe with actual values and predicted values of validation set ''' df = self.data model = self.model X,y = df.iloc[:,:-1],df.iloc[:,-1] X_train,X_val,y_train,y_val = train_test_split(X,y,test_size = 0.3,random_state = 2020) model.fit(X_train,y_train) print('R square score on train set and test set are :',model.score(X_train,y_train),model.score(X_val,y_val)) print('Root mean squared error on test set is :',np.sqrt(mean_squared_error(y_val,model.predict(X_val)))) print('Mean absolute error on test set is :',mean_absolute_error(y_val,model.predict(X_val))) fimp = pd.DataFrame(zip(X.columns,model.coef_),columns = ['feat','coeff']).sort_values(by = 'coeff',ascending = False) fimp['abs_coeff'] = fimp['coeff'].apply(lambda x : x if x > 0 else -x) fimp['rel'] = fimp['coeff'].apply(lambda x : 'pos' if x > 0 else 'neg') fimp['rel'] = fimp['rel'].astype('category') fimp = fimp.sort_values(by = 'abs_coeff',ascending = False) pred = model.predict(X_val) diag = pd.DataFrame(zip(y_val,pred),columns = ['Ground Truth','Predicted']) full_name = './Models/'+filename+'.sav' joblib.dump(model, full_name) return fimp,diag def plot_feat_imp(self,fimp,title): ''' Method inside the model_construction class, used for creating a feature importance plot Input : fimp : Dataframe with feature importance title : Title of the plot Displays a plot ''' plt.figure(figsize = (18,12)) sns.barplot(y = 'feat', x = 'abs_coeff', hue = 'rel',data = fimp) plt.title('Feature Importance plot for '+title) def plot_diagnostic(self,diag): ''' Method inside the model_construction class, used for creating a diagnostic plot ground truth vs predicted Input : diag : Dataframe with feature importance Displays a plot ''' plt.figure(figsize = (18,9)) g = sns.scatterplot(x = 'Ground Truth', y = 'Predicted',data = diag) plt.title('Ground Truth vs Predicted on validation Data') plt.show()
normal
{ "blob_id": "f07b95a3b18aecf6cadaa8398c9158a7cd10aeeb", "index": 7101, "step-1": "<mask token>\n\n\nclass model_construction:\n <mask token>\n\n def implement_model(self, filename):\n \"\"\"\n Method inside the model_construction class, used for implementing the model\n and return feature importance and dataframe with actual values and predicted values of validation set\n \n Input :\n tsize : size of the dataset for the validation default value 0.3\n random_val : Seed for randomness for reproducibility default value 2020\n \n Returns :\n fimp : Feature importance of a model\n diag : diagnostic dataframe with actual values and predicted values of validation set\n \"\"\"\n df = self.data\n model = self.model\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=\n 0.3, random_state=2020)\n model.fit(X_train, y_train)\n print('R square score on train set and test set are :', model.score\n (X_train, y_train), model.score(X_val, y_val))\n print('Root mean squared error on test set is :', np.sqrt(\n mean_squared_error(y_val, model.predict(X_val))))\n print('Mean absolute error on test set is :', mean_absolute_error(\n y_val, model.predict(X_val)))\n fimp = pd.DataFrame(zip(X.columns, model.coef_), columns=['feat',\n 'coeff']).sort_values(by='coeff', ascending=False)\n fimp['abs_coeff'] = fimp['coeff'].apply(lambda x: x if x > 0 else -x)\n fimp['rel'] = fimp['coeff'].apply(lambda x: 'pos' if x > 0 else 'neg')\n fimp['rel'] = fimp['rel'].astype('category')\n fimp = fimp.sort_values(by='abs_coeff', ascending=False)\n pred = model.predict(X_val)\n diag = pd.DataFrame(zip(y_val, pred), columns=['Ground Truth',\n 'Predicted'])\n full_name = './Models/' + filename + '.sav'\n joblib.dump(model, full_name)\n return fimp, diag\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass model_construction:\n\n def __init__(self, data, model, fit_intercept=True, alpha=1.0, max_iter\n =1000, solver='auto'):\n \"\"\"\n Constructor to set the values before creating the model\n \n Input Parameters :\n \n data : Input DataFrame\n model : Model to be implemented\n alpha : Regularization constant applicable for Ridge and Lasso\n max_iter : Maximimum iterations applicable for Lasso\n solver : Type of solver to use applicable for Ridge\n \n \"\"\"\n self.data = data\n self.alpha = alpha\n self.max_iter = max_iter\n self.solver = solver\n self.fit_intercept = fit_intercept\n if model == 'LinearRegression':\n self.model = LinearRegression(fit_intercept=self.fit_intercept)\n elif model == 'Lasso':\n self.model = Lasso(alpha=self.alpha, max_iter=self.max_iter,\n fit_intercept=self.fit_intercept)\n elif model == 'Ridge':\n self.model = Ridge(alpha=self.alpha, solver=self.solver,\n fit_intercept=self.fit_intercept)\n else:\n raise Exception('Wrong input model')\n\n def implement_model(self, filename):\n \"\"\"\n Method inside the model_construction class, used for implementing the model\n and return feature importance and dataframe with actual values and predicted values of validation set\n \n Input :\n tsize : size of the dataset for the validation default value 0.3\n random_val : Seed for randomness for reproducibility default value 2020\n \n Returns :\n fimp : Feature importance of a model\n diag : diagnostic dataframe with actual values and predicted values of validation set\n \"\"\"\n df = self.data\n model = self.model\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=\n 0.3, random_state=2020)\n model.fit(X_train, y_train)\n print('R square score on train set and test set are :', model.score\n (X_train, y_train), model.score(X_val, y_val))\n print('Root mean squared error on test set is :', np.sqrt(\n mean_squared_error(y_val, model.predict(X_val))))\n print('Mean absolute error on test set is :', mean_absolute_error(\n y_val, model.predict(X_val)))\n fimp = pd.DataFrame(zip(X.columns, model.coef_), columns=['feat',\n 'coeff']).sort_values(by='coeff', ascending=False)\n fimp['abs_coeff'] = fimp['coeff'].apply(lambda x: x if x > 0 else -x)\n fimp['rel'] = fimp['coeff'].apply(lambda x: 'pos' if x > 0 else 'neg')\n fimp['rel'] = fimp['rel'].astype('category')\n fimp = fimp.sort_values(by='abs_coeff', ascending=False)\n pred = model.predict(X_val)\n diag = pd.DataFrame(zip(y_val, pred), columns=['Ground Truth',\n 'Predicted'])\n full_name = './Models/' + filename + '.sav'\n joblib.dump(model, full_name)\n return fimp, diag\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass model_construction:\n\n def __init__(self, data, model, fit_intercept=True, alpha=1.0, max_iter\n =1000, solver='auto'):\n \"\"\"\n Constructor to set the values before creating the model\n \n Input Parameters :\n \n data : Input DataFrame\n model : Model to be implemented\n alpha : Regularization constant applicable for Ridge and Lasso\n max_iter : Maximimum iterations applicable for Lasso\n solver : Type of solver to use applicable for Ridge\n \n \"\"\"\n self.data = data\n self.alpha = alpha\n self.max_iter = max_iter\n self.solver = solver\n self.fit_intercept = fit_intercept\n if model == 'LinearRegression':\n self.model = LinearRegression(fit_intercept=self.fit_intercept)\n elif model == 'Lasso':\n self.model = Lasso(alpha=self.alpha, max_iter=self.max_iter,\n fit_intercept=self.fit_intercept)\n elif model == 'Ridge':\n self.model = Ridge(alpha=self.alpha, solver=self.solver,\n fit_intercept=self.fit_intercept)\n else:\n raise Exception('Wrong input model')\n\n def implement_model(self, filename):\n \"\"\"\n Method inside the model_construction class, used for implementing the model\n and return feature importance and dataframe with actual values and predicted values of validation set\n \n Input :\n tsize : size of the dataset for the validation default value 0.3\n random_val : Seed for randomness for reproducibility default value 2020\n \n Returns :\n fimp : Feature importance of a model\n diag : diagnostic dataframe with actual values and predicted values of validation set\n \"\"\"\n df = self.data\n model = self.model\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=\n 0.3, random_state=2020)\n model.fit(X_train, y_train)\n print('R square score on train set and test set are :', model.score\n (X_train, y_train), model.score(X_val, y_val))\n print('Root mean squared error on test set is :', np.sqrt(\n mean_squared_error(y_val, model.predict(X_val))))\n print('Mean absolute error on test set is :', mean_absolute_error(\n y_val, model.predict(X_val)))\n fimp = pd.DataFrame(zip(X.columns, model.coef_), columns=['feat',\n 'coeff']).sort_values(by='coeff', ascending=False)\n fimp['abs_coeff'] = fimp['coeff'].apply(lambda x: x if x > 0 else -x)\n fimp['rel'] = fimp['coeff'].apply(lambda x: 'pos' if x > 0 else 'neg')\n fimp['rel'] = fimp['rel'].astype('category')\n fimp = fimp.sort_values(by='abs_coeff', ascending=False)\n pred = model.predict(X_val)\n diag = pd.DataFrame(zip(y_val, pred), columns=['Ground Truth',\n 'Predicted'])\n full_name = './Models/' + filename + '.sav'\n joblib.dump(model, full_name)\n return fimp, diag\n\n def plot_feat_imp(self, fimp, title):\n \"\"\"\n Method inside the model_construction class, used for creating a feature importance plot\n \n Input :\n fimp : Dataframe with feature importance\n title : Title of the plot\n \n Displays a plot\n \"\"\"\n plt.figure(figsize=(18, 12))\n sns.barplot(y='feat', x='abs_coeff', hue='rel', data=fimp)\n plt.title('Feature Importance plot for ' + title)\n <mask token>\n", "step-4": "<mask token>\n\n\nclass model_construction:\n\n def __init__(self, data, model, fit_intercept=True, alpha=1.0, max_iter\n =1000, solver='auto'):\n \"\"\"\n Constructor to set the values before creating the model\n \n Input Parameters :\n \n data : Input DataFrame\n model : Model to be implemented\n alpha : Regularization constant applicable for Ridge and Lasso\n max_iter : Maximimum iterations applicable for Lasso\n solver : Type of solver to use applicable for Ridge\n \n \"\"\"\n self.data = data\n self.alpha = alpha\n self.max_iter = max_iter\n self.solver = solver\n self.fit_intercept = fit_intercept\n if model == 'LinearRegression':\n self.model = LinearRegression(fit_intercept=self.fit_intercept)\n elif model == 'Lasso':\n self.model = Lasso(alpha=self.alpha, max_iter=self.max_iter,\n fit_intercept=self.fit_intercept)\n elif model == 'Ridge':\n self.model = Ridge(alpha=self.alpha, solver=self.solver,\n fit_intercept=self.fit_intercept)\n else:\n raise Exception('Wrong input model')\n\n def implement_model(self, filename):\n \"\"\"\n Method inside the model_construction class, used for implementing the model\n and return feature importance and dataframe with actual values and predicted values of validation set\n \n Input :\n tsize : size of the dataset for the validation default value 0.3\n random_val : Seed for randomness for reproducibility default value 2020\n \n Returns :\n fimp : Feature importance of a model\n diag : diagnostic dataframe with actual values and predicted values of validation set\n \"\"\"\n df = self.data\n model = self.model\n X, y = df.iloc[:, :-1], df.iloc[:, -1]\n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=\n 0.3, random_state=2020)\n model.fit(X_train, y_train)\n print('R square score on train set and test set are :', model.score\n (X_train, y_train), model.score(X_val, y_val))\n print('Root mean squared error on test set is :', np.sqrt(\n mean_squared_error(y_val, model.predict(X_val))))\n print('Mean absolute error on test set is :', mean_absolute_error(\n y_val, model.predict(X_val)))\n fimp = pd.DataFrame(zip(X.columns, model.coef_), columns=['feat',\n 'coeff']).sort_values(by='coeff', ascending=False)\n fimp['abs_coeff'] = fimp['coeff'].apply(lambda x: x if x > 0 else -x)\n fimp['rel'] = fimp['coeff'].apply(lambda x: 'pos' if x > 0 else 'neg')\n fimp['rel'] = fimp['rel'].astype('category')\n fimp = fimp.sort_values(by='abs_coeff', ascending=False)\n pred = model.predict(X_val)\n diag = pd.DataFrame(zip(y_val, pred), columns=['Ground Truth',\n 'Predicted'])\n full_name = './Models/' + filename + '.sav'\n joblib.dump(model, full_name)\n return fimp, diag\n\n def plot_feat_imp(self, fimp, title):\n \"\"\"\n Method inside the model_construction class, used for creating a feature importance plot\n \n Input :\n fimp : Dataframe with feature importance\n title : Title of the plot\n \n Displays a plot\n \"\"\"\n plt.figure(figsize=(18, 12))\n sns.barplot(y='feat', x='abs_coeff', hue='rel', data=fimp)\n plt.title('Feature Importance plot for ' + title)\n\n def plot_diagnostic(self, diag):\n \"\"\"\n Method inside the model_construction class, used for creating a diagnostic plot ground truth vs predicted\n \n Input :\n diag : Dataframe with feature importance\n \n Displays a plot\n \"\"\"\n plt.figure(figsize=(18, 9))\n g = sns.scatterplot(x='Ground Truth', y='Predicted', data=diag)\n plt.title('Ground Truth vs Predicted on validation Data')\n plt.show()\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 4 12:14:16 2020\n\n@author: mdevasish\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression,Lasso,Ridge\nfrom sklearn.metrics import mean_squared_error,mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nimport joblib\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\nclass model_construction:\n \n def __init__(self,data,model,fit_intercept = True,alpha = 1.0, max_iter = 1000, solver = 'auto'):\n '''\n Constructor to set the values before creating the model\n \n Input Parameters :\n \n data : Input DataFrame\n model : Model to be implemented\n alpha : Regularization constant applicable for Ridge and Lasso\n max_iter : Maximimum iterations applicable for Lasso\n solver : Type of solver to use applicable for Ridge\n \n '''\n self.data = data\n self.alpha = alpha\n self.max_iter = max_iter\n self.solver = solver\n self.fit_intercept = fit_intercept\n if model == 'LinearRegression':\n self.model = LinearRegression(fit_intercept = self.fit_intercept)\n elif model == 'Lasso':\n self.model = Lasso(alpha = self.alpha,max_iter = self.max_iter,fit_intercept = self.fit_intercept)\n elif model == 'Ridge':\n self.model = Ridge(alpha = self.alpha,solver = self.solver,fit_intercept = self.fit_intercept)\n else:\n raise Exception('Wrong input model')\n \n def implement_model(self,filename):\n '''\n Method inside the model_construction class, used for implementing the model\n and return feature importance and dataframe with actual values and predicted values of validation set\n \n Input :\n tsize : size of the dataset for the validation default value 0.3\n random_val : Seed for randomness for reproducibility default value 2020\n \n Returns :\n fimp : Feature importance of a model\n diag : diagnostic dataframe with actual values and predicted values of validation set\n '''\n df = self.data\n model = self.model\n \n \n X,y = df.iloc[:,:-1],df.iloc[:,-1]\n X_train,X_val,y_train,y_val = train_test_split(X,y,test_size = 0.3,random_state = 2020)\n \n model.fit(X_train,y_train)\n \n print('R square score on train set and test set are :',model.score(X_train,y_train),model.score(X_val,y_val))\n print('Root mean squared error on test set is :',np.sqrt(mean_squared_error(y_val,model.predict(X_val))))\n print('Mean absolute error on test set is :',mean_absolute_error(y_val,model.predict(X_val)))\n \n fimp = pd.DataFrame(zip(X.columns,model.coef_),columns = ['feat','coeff']).sort_values(by = 'coeff',ascending = False)\n fimp['abs_coeff'] = fimp['coeff'].apply(lambda x : x if x > 0 else -x)\n fimp['rel'] = fimp['coeff'].apply(lambda x : 'pos' if x > 0 else 'neg')\n fimp['rel'] = fimp['rel'].astype('category')\n fimp = fimp.sort_values(by = 'abs_coeff',ascending = False)\n \n pred = model.predict(X_val)\n diag = pd.DataFrame(zip(y_val,pred),columns = ['Ground Truth','Predicted'])\n \n full_name = './Models/'+filename+'.sav'\n joblib.dump(model, full_name)\n return fimp,diag\n\n def plot_feat_imp(self,fimp,title):\n '''\n Method inside the model_construction class, used for creating a feature importance plot\n \n Input :\n fimp : Dataframe with feature importance\n title : Title of the plot\n \n Displays a plot\n '''\n plt.figure(figsize = (18,12))\n sns.barplot(y = 'feat', x = 'abs_coeff', hue = 'rel',data = fimp)\n plt.title('Feature Importance plot for '+title)\n \n def plot_diagnostic(self,diag):\n '''\n Method inside the model_construction class, used for creating a diagnostic plot ground truth vs predicted\n \n Input :\n diag : Dataframe with feature importance\n \n Displays a plot\n '''\n \n plt.figure(figsize = (18,9))\n g = sns.scatterplot(x = 'Ground Truth', y = 'Predicted',data = diag)\n plt.title('Ground Truth vs Predicted on validation Data')\n plt.show()\n", "step-ids": [ 2, 3, 4, 5, 7 ] }
[ 2, 3, 4, 5, 7 ]
# Generated by Django 3.2.2 on 2021-05-11 09:49 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('meeting', '0004_auto_20210511_0947'), ] operations = [ migrations.AlterField( model_name='event', name='end', field=models.DateTimeField(auto_now_add=True), ), migrations.AlterField( model_name='event', name='start', field=models.DateTimeField(auto_now_add=True), ), ]
normal
{ "blob_id": "1c1cd0eeea4dbf446aa4582f42ef1f3b5a4e8875", "index": 7452, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('meeting', '0004_auto_20210511_0947')]\n operations = [migrations.AlterField(model_name='event', name='end',\n field=models.DateTimeField(auto_now_add=True)), migrations.\n AlterField(model_name='event', name='start', field=models.\n DateTimeField(auto_now_add=True))]\n", "step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('meeting', '0004_auto_20210511_0947')]\n operations = [migrations.AlterField(model_name='event', name='end',\n field=models.DateTimeField(auto_now_add=True)), migrations.\n AlterField(model_name='event', name='start', field=models.\n DateTimeField(auto_now_add=True))]\n", "step-5": "# Generated by Django 3.2.2 on 2021-05-11 09:49\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('meeting', '0004_auto_20210511_0947'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='event',\n name='end',\n field=models.DateTimeField(auto_now_add=True),\n ),\n migrations.AlterField(\n model_name='event',\n name='start',\n field=models.DateTimeField(auto_now_add=True),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Pass Function def hello_func(): pass hello_func() print(hello_func()) def hello_func(): hello_func() print(hello_func) # Function allows to reuse ,without repeat def hello_func(): print('hello function!') hello_func()
normal
{ "blob_id": "94a0b341aac3683712578b31e98a0a5a6a643b57", "index": 7646, "step-1": "def hello_func():\n pass\n\n\n<mask token>\n", "step-2": "def hello_func():\n pass\n\n\n<mask token>\n\n\ndef hello_func():\n print('hello function!')\n hello_func()\n", "step-3": "def hello_func():\n pass\n\n\n<mask token>\n\n\ndef hello_func():\n hello_func()\n\n\n<mask token>\n\n\ndef hello_func():\n print('hello function!')\n hello_func()\n", "step-4": "def hello_func():\n pass\n\n\nhello_func()\nprint(hello_func())\n\n\ndef hello_func():\n hello_func()\n\n\nprint(hello_func)\n\n\ndef hello_func():\n print('hello function!')\n hello_func()\n", "step-5": "# Pass Function\n\ndef hello_func():\n pass\n\n\nhello_func()\nprint(hello_func())\n\n\ndef hello_func():\n hello_func()\n\n\nprint(hello_func)\n\n# Function allows to reuse ,without repeat\n\n\ndef hello_func():\n print('hello function!')\n hello_func()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- """ @Author: xiezizhe @Date: 5/7/2020 下午8:52 """ from typing import List class KMP: def partial(self, pattern): """ Calculate partial match table: String -> [Int]""" ret = [0] for i in range(1, len(pattern)): j = ret[i - 1] while j > 0 and pattern[j] != pattern[i]: j = ret[j - 1] ret.append(j + 1 if pattern[j] == pattern[i] else j) return ret def search(self, T, P): """ KMP search main algorithm: String -> String -> [Int] Return all the matching position of pattern string P in T """ partial, j = self.partial(P), 0 for i in range(len(T)): while j > 0 and T[i] != P[j]: j = partial[j - 1] if T[i] == P[j]: j += 1 if j == len(P): return i - (j - 1) return -1 class Trie: def __init__(self): self.dicts = dict() def add(self, word): node = self.dicts for w in word: if w not in node: node[w] = dict() node = node[w] def search(self, word): node = self.dicts for w in word: if w not in node: return False node = node[w] return True class Solution: # def minimumLengthEncoding(self, words: List[str]) -> int: # kmp = KMP() # ret = 0 # texts = '' # words.sort(key=lambda w: len(w), reverse=True) # for word in words: # idx = kmp.search(texts, word) # if idx == -1: # ret += len(word) # if len(texts) == 0: # texts = word + "#" # else: # texts = texts + word + '#' # ret += 1 # # # print(texts) # for word in words: # if word not in texts: # print(word) # return len(texts) def minimumLengthEncoding(self, words: List[str]) -> int: trie = Trie() ret = 0 words.sort(key=lambda w: len(w), reverse=True) for word in words: if trie.search(word[::-1]): continue trie.add(word[::-1]) ret += len(word) + 1 return ret if __name__ == "__main__": s = Solution() assert s.minimumLengthEncoding(["time", "me", "bell"]) == 10 assert s.minimumLengthEncoding( ["ojtnj", "uuydcho", "dgsyp", "dwxycpx", "dpmvc", "dvfhmb", "flrxjjx", "fwhdhvn", "rgsakp", "aiconf", "nzacpk", "sbxnaj", "shway", "rgrmz", "rysudo", "bzkioce", "mqxkzvu", "wyebk", "tymoaz", "mlmbg", "djbmek", "qfnme", "khkiyae", "tjdaxry", "sqtcwz", "ehnsai", "jhncvrm", "cxkzgrx", "pummt", "hzrpfcn", "lkyqit", "phpqdxw", "vangm", "wcjdgw", "pxesvtn", "mnqory", "bdrzvh", "brtzmo", "chqgf", "bipyxm", "meoikg", "ysyckk", "ojayeiq", "zrfbsb", "yhuotea", "crfbhq", "tllycn", "qxnzihf", "avyawpz", "bwsjym", "myjozc", "lbdksm", "mctlt", "dszowuw", "syshm", "xrvhhkn", "kgrcwfv", "dwlajlf", "yviuk", "xegjj", "spiczl", "vfvomi", "mgcujy", "dqmzb", "isrisgt", "vdrtuah", "vsyth", "eoclef", "poccek", "cgafrlu", "crbhpgk", "sromv", "xmvbca", "gobra", "ygvlq", "pjvhe", "tfweiso", "cskuohg", "eyalone", "pobkak", "nzpxn", "lbcrws", "uhtfe", "eorth", "showvu", "hxsmb", "jrggose", "izifkb", "oqwyf", "mozmzj", "ijwle", "ggtqqqv", "geevzj", "meota", "ifsse", "kdtofm", "swydhvf", "tzjhqap", "wqwwd", "jlinnov", "lmxkgeg", "stbot", "xrsfn", "etoyctk", "rygagm", "vcnrf", "zkdge", "emqtscp", "newqcyy", "nnuus", "exwsxbd", "zstvl", "lbkko", "kygkyqq", "oggji", "xytbjo", "mfbahk", "ggoks", "lmqewkl", "qexhyqe", "ogaogio", "nzvbav", "mdole", "qvyks", "gkupfu", "dgmpn", "ngrdrj", "iitqvk", "ipuiqb", "ugxfea", "ialkmv", "hmgnx", "aoyoj", "fvzhjil", "butrbp", "dwhxnes", "etkdwg", "cjkghz", "tovkq", "mmxhv", "jgcsn", "hmictal", "zxmnek", "pcoeg", "ntyqmlq", "hfubhtg", "ydjbv", "xnwlqto", "hatgi", "bsaczd", "pokwk", "arxlula", "zjtqlk", "ocfxup", "nsnqjc", "xdcsopi", "iqxyxp", "xfmtpvm", "bqtgcf", "wboycn", "aoeda", "uowqdgj", "rzzzx", "liucs", "ejzxz", "qmlehsh", "igrbmon", "dpmkbon", "pmayh", "nujdwdw", "awdgo", "ijgkzk", "inhee", "jzdtv", "adhauh", "grtmbp", "qndbvw", "zprrw", "mpqieq", "jzmzeuu", "fcvftqs", "qxzxqy", "lidguzz", "eazwd", "zjhfsz", "zsnzefh", "mnckfg", "zjgtq", "ckyxlif", "fznfo", "jegnof", "lzwyzb", "ozivfio", "igkclsa", "bebzn", "bitsggm", "lrnwin", "hjnnzr", "idvoirn", "dgile", "vfngh", "xbmur", "rqaftt", "wjwwwxs", "btreou", "gjsycg", "pvsiylz", "ccxzgdf", "excrrrr", "fiesr", "jdioj", "uzwsc", "odrlcoy", "hcsit", "ptwfprh", "sbqry", "kffvy", "ejeawbp", "omvcc", "iqgxqlt", "edsuu", "xnbue", "qfbcx", "fzlmbkl", "wrrcueb", "mmqispp", "nknilwd", "dewuhju", "hmdqlxy", "vjxgg", "lkuexo", "dzvfscm", "voulbs", "uevoqgq", "kmhwu", "oglzllg", "torhihn", "fhuqzc", "mmcfhb", "woyayma", "uznsvre", "mmxed", "aoskwg", "xrosbm", "hpyrgh", "tghwbwh", "hcwzn", "iepeftj", "judij", "kudbk", "jonpv", "lywck", "rxelz", "bgifz", "mehbxq", "fmqnz", "sqrmzj", "iqqjzex", "qioliz", "kjizbf", "lgdcffc", "pfgmcr", "trdabul", "vlqjdnc", "jjvbxe", "fqlayw", "ilbhtyq", "saawulw", "gxysrb", "kighql", "eceapr", "kztbcww", "jedkoy", "dxpcaga", "ndacphe", "rcoit", "ywgcnxg", "klipfup", "bddws", "jwyof", "lrfwgo", "bediwuf", "ujakh", "ppima", "xzhwvm", "guzmsqt", "ffbliq", "adjmynm", "akabzn", "inmykju", "vlcjyv", "orquepg", "tufrk", "vqpjymm", "lvuab", "qzxav", "ekcmu", "uqtuhie", "kfvtgf", "nklwjo", "ujxlfpl", "zobfpq", "eignijd", "ythctg", "artllm", "wodhh", "tzpwszq", "njdqegg", "hzrqib", "zvoxtfd", "htboem", "axjuix", "bvmvm", "jbnum", "bxdth", "atejt", "gqsqtnk", "fykrjbp", "ldyhonr", "wcuoj", "upphc", "agydg", "cjmwk", "rhxbqh", "tpgozdd", "qyqoy", "zjqutw", "qoohqny", "nsiacwz", "xupin", "criuvs", "eswjeft", "pdmevn", "zvogq", "lrrvo", "qhfqqpw", "ktudfg", "ijvmi", "neyjjdx", "rllpi", "vllvaa", "esebtu", "jyhcrh", "otgmr", "oudvyxj", "pmszy", "opeed", "gicni", "mnuzn", "mjbfpod", "sqwgxu", "dwniwz", "wmbmmv", "lyafuy", "zmvlz", "kopxzuh", "urcbbiy", "guhco", "nerjm", "lpdxc", "hxmjzz", "hynagc", "iyxeczi", "bdfxmoz", "yybnpqd", "jvgnb", "oquqem", "fmclmz", "dmkhf", "zxbjpp", "qpxgcir", "iecvjm", "gtkne", "lgtqrbc", "gilbn", "mcxsg", "ncwbhn", "wkriiq", "zhsir", "ptkkmw", "jcbpkrm", "vbefo", "vmbcd", "vqffj", "fhqzjt", "nryuh", "vmclav", "cjyggm", "sanev", "rrdocz", "zqdexbs", "jrxstt", "pyhcesj", "aagghyr", "cyemjrb", "aliohf", "qaslg", "pnyjzxz", "pehnvi", "suhuw", "twopabr", "sapqoc", "mckrh", "nzlgrxt", "aqpobnu", "pirbjgb", "plzlj", "raylxpu", "gyasfrh", "urjfxux", "xjbwau", "iupknn", "vhxnc", "dnbjop", "vrxhwmd", "vjsmkh", "rfmqids", "smaiwt", "vkyfo", "bjqyxc", "rbbbp", "dlkzg", "dwvdwu", "prulzh", "bavge", "ehhrz", "xxjqk", "pxopmp", "okmkmb", "slcznpp", "nvqlb", "jalrk", "parwlcd", "anbxo", "oqcxyzo", "fjhrdjh", "pgvnwfe", "yfjyvh", "quvszjm", "xyiig", "xtncqv", "svsix", "jvpdnh", "owuiv", "bsrugtt", "rmvggws", "lmdql", "kvmvd", "xrpmaw", "ssnxyb", "oworq", "rmmpuya", "rijpih", "aelazka", "kncksqx", "yvtdiy", "epato", "pbbamj", "fejsw", "zgsru", "ekwrre", "zqben", "vugxi", "fvcsdp", "rujcews", "asqxya", "worjlsd", "xggakg", "kzfpot", "haqon", "ypqxzz", "mmkzwt", "bdhif", "exzhv", "srnklzh", "hlrunb", "dwfyke", "fvgbtdm", "aeutp", "czhefx", "tegfw", "jkxpsb", "gxkfkw", "exvntd", "gvuti", "jdmly", "owaqhw", "fopuxzv", "edrvil", "biszwgv", "vgckzd", "fqdxn", "qktdf", "hpgwrk", "gpxiips", "vxnlab", "yylxz", "hsuscch", "bhivaf", "wzrwtc", "ebplv", "yzxykou", "mxlssom", "evghv", "hksleg", "shybau", "zeyqa", "tljqka", "axfkec", "fatdj", "janlkcc", "sjorbra", "jplge", "oazzot", "qbgtncn", "ozlil", "stohadq", "rvpuwn", "oqwpl", "byftgi", "ubuusl", "fkogr", "bybdyhj", "vinyuzs", "ivsqvz", "vmnae", "gckxw", "rozbe", "glvxwj", "rcgicu", "xmvbd", "itycsry", "llmwrs", "fuqth", "styrrwl", "wsseuln", "xwflcli", "muxgz", "ypmbboh", "rpmvnep", "wjvvnv", "arjnw", "toauwc", "ltjxqrl", "basffd", "clxozwd", "glmrv", "iejgfj", "cvkoj", "wotjf", "mqucec", "xalgemc", "hgimkh", "golvfq", "fuqpmak", "mhpcp", "pxoibt", "ledqa", "guzbyr", "ztvbeka", "racdp", "krsngra", "aaiknz", "bhoobyc", "xibbe", "yohepxk", "eclevs", "ldliwcm", "qatvlk", "eiypbw", "vxvtwa", "nkdwsej", "ftmyvp", "gpthye", "gazwoi", "zzgipon", "cithg", "wpabujl", "jhezlnb", "vqqaxfg", "kvpbk", "vggjemp", "owylv", "lgwtfpg", "jjqvfm", "xbhga", "tulvfv", "sefuo", "hbysv", "ozopepd", "awyrifd", "pnudwx", "vreje", "zhpgw", "qygbf", "tvbrvy", "zzmcw", "cznee", "deuzxt", "qfppjvi", "ilkps", "ydwhg", "krwkxzu", "mnsidg", "rkxyyr", "ajkqz", "xtmom", "vqocor", "fympcl", "yyleyzy", "jjvzhrn", "kpmxvuz", "txoeqlx", "lhhmn", "chzgpf", "ncnjxle", "ihxrg", "feqixq", "lkfhcar", "hfnsh", "bifczy", "umknat", "yrhgkh", "mgpcu", "qotukst", "yqlmfq", "ttcdp", "xnjjzm", "cukbr", "hjhjb", "iikfcsr", "nsqbnnz", "dauygf", "cmydq", "lfnhqnl", "ppqgs", "hscbfug", "ohzisud", "opspdkv", "aauxbop", "wpkhzo", "sxbsgu", "tajrv", "ololy", "mxmus", "vizvxv", "osaqz", "rxygkn", "mrzqlf", "zrriyxb", "ufroe", "bajozg", "atpsu", "uhgauzu", "tffdw", "mdjulde", "rbrmy", "jhkqvwl", "gzsultq", "nkbfi", "xtvwh", "dryzcv", "emaxuk", "zucvutb", "jdduyk", "bjdin", "loicuq", "qhjjb", "rgfjbq", "mphnk", "lxvceyx", "zeoxb", "fxhnxu", "qpbipe", "ophwp", "wiioer", "quchwj", "pouxunw", "bloxgg", "xbsma", "dtwew", "xstorn", "qfrfkz", "gxusbsn", "dhnxd", "mhstbs", "hekbtu", "wvrrjw", "yeiwd", "patplsx", "qmyiyi", "mowboj", "iskyd", "bqhjj", "povppk", "vthpwx", "uuydaw", "rduxvez", "vmcww", "ylruvph", "ymqosp", "wzcvohg", "lhepwta", "bckhc", "oiyyt", "wqzfv", "uduec", "lkkbtzl", "prvpbo", "jrwstii", "ijztoo", "qwwth", "vqzqiun", "krnjp", "zyanpiw", "ojhjhvg", "lohmb", "thqtf", "reptzv", "zgkyq", "lhkvy", "cmjwl", "fmilgpw", "jrfawz", "vrtzd", "ezgfl", "plzng", "zidzso", "civavlg", "vtwopu", "ljhckxo", "nuydt", "qembl", "fiwrre", "gfrgi", "gzegiq", "mltlqo", "pcett", "snbsc", "msibcqn", "beacrhz", "vsycjt", "gjqji", "smcegol", "zregkp", "smcazoj", "dziqad", "jpuwp", "hnlztac", "vduitco", "wyencad", "bkdnnqo", "cabzyg", "mgpcwr", "fxgvkxt", "wlkcrdd", "bhmhsy", "gqcctjc", "atafpt", "vdzhmcg", "ighxj", "gfqpale", "fohbrtj", "mfpsgt", "tarjocf", "gyycb", "qvqfryl", "jpwowwc", "jcgcg", "gmrjze", "nfptxq", "hmjhxge", "ieelj", "suvkgr", "nwjxe", "tkepqm", "extnpmq", "rxzdvf", "relzaa", "hfhgaq", "lmihlz", "pacocq", "dclxr", "oknoem", "pbpnnd", "nleerfl", "tvytymc", "aamfnl", "ufdnq", "bxyzvyh", "vksvout", "lohxhf", "sskgn", "aawbv", "hrvhx", "wvoqf", "vxkvh", "oqany", "bcmyd", "epdddqn", "zrlej", "bchaf", "hmftii", "mefcrz", "wbxvc", "ewwnldf", "cqecxgh", "cnwvdmk", "vetrw", "zmogwov", "lshlzpe", "lijay", "tcdqg", "xavqixd", "yjkhtsl", "myjvow", "cgthhd", "taaii", "iuuegk", "lcypmle", "wesrit", "tybco", "nhxysw", "awkrj", "jcmqa", "porvo", "nrypriu", "vznnevp", "hzklwi", "vapuxh", "wyfkn", "albemu", "ttfdbl", "dbqrjv", "cxals", "qzitwf", "ysunur", "llsefy", "cghfzji", "jboaa", "emhlkw", "khhmgha", "twlxgjz", "pyujor", "ozcax", "fetvovo", "mdhrrd", "qdhdne", "fiuvw", "ebyxh", "ldaothh", "vwyjf", "yjyljlu", "ivroqg", "qvpeyec", "eemsdra", "wavgeqk", "bjejrqg", "mdjimoz", "fgopy", "lgwodr", "cunvszh", "wiver", "ghmog", "jzgfyk", "vxlbx", "kvgbtn", "cunorte", "mtesdc", "zdzmqu", "pigik", "smruadg", "czjxlt", "kukgaok", "tsldpqq", "luomo", "ezbcvdc", "tfetwes", "uopzf", "wsvezkw", "wrnlvbx", "bpqungd", "jqnnof", "rqhiomi", "voulqb", "ouspxn", "chngpz", "fbogfcv", "nqhunxo", "rydbke", "ewduo", "suqqwup", "oxzfxj", "kuwfwm", "euiics", "mvftoau", "vstfbm", "vnmtoo", "muicf", "bjbskxb", "knbomlf", "enrbtfk", "hnaqe", "vxzsr", "gkqma", "qygmn", "ztkybmb", "injggpk", "enqrgdk", "rkgoct", "tgaiu", "dnknoxk", "iwuou", "oxanccl", "xestej", "ekrqq", "xbwhz", "jkdvxfh", "oybaay", "afyhci", "papffjq", "bdppssw", "qwyvjx", "xmnnosl", "kvqzjl", "wcwii", "ygfvt", "tpabbht", "kjmaq", "duschjz", "gguiof", "wgfhve", "joqmfjq", "smqfd", "ynlovlz", "sgrzum", "bobmux", "dcppi", "isdjrwl", "lbevb", "efqsirq", "hlgfql", "enmemlb", "dbmfk", "ibfpzm", "rtdnooq", "yicdq", "xadul", "dxibxzi", "yyxnj", "jhsdzxw", "thltbi", "kwhreyi", "hrocoa", "fnaalbd", "vnwona", "nnonm", "naqaf", "xgzzies", "uhruynk", "kgadfx", "hyohzbd", "hnajx", "yipzh", "ezdxaet", "xbzppoz", "rwnewxz", "hlcbkmb", "znyhu", "zsqtpkr", "gmyxr", "rphyvo", "bgjuz", "nulpv", "eejfoso", "xmwcnes", "xxxxnpe", "jezkk", "idfsxrw", "qgzjtf", "arpzpo", "hxsanlt", "emvotcb", "sknzhvg", "icitca", "ivhdln", "sqilerz", "ndigw", "bcsre", "mibbep", "zsczom", "cgghjbb", "fkylfgt", "bvzofs", "mefsng", "bispbza", "tsosgy", "xopalrw", "wserf", "jbmlz", "xidxny", "ffmpjos", "vddwxmd", "netnsg", "kgevsp", "pguuv", "cwisp", "slxiyb", "dmwaguc", "jobwusu", "uytcqrv", "hzhsy", "zrlsdd", "xhxah", "rxzij", "zwdgy", "ygmvkz", "drkzbo", "qpsal", "tpxvl", "lfmfl", "sayjvlh", "rdamym", "ycuzd", "zkycu", "hdesec", "unequk", "lpkdid", "vorxls", "admsdop", "rqnvkyg", "krnqqtb", "rxfms", "xfthd", "pxjbk", "gpslrg", "rwziwef", "usxgqvz", "baxxye", "ocrkkrw", "lrlgsp", "ceyctg", "rniml", "vavug", "jgircl", "jrpnmsa", "rywvlfg", "prxnys", "fkzmknn", "ooelc", "btvfs", "yqepuvw", "tmmmb", "qmpzexb", "zjckjvd", "aieytbb", "oafqq", "szrcyh", "czrxgae", "ifkte", "hfgajox", "pwpnkqq", "yqphogn", "xuwthrd", "mpcmy", "qitdoa", "avlzfrh", "ywpip", "dgeki", "fgbnx", "tyofu", "xziqzj", "qxzvqz", "vtsqk", "ipkld", "yfhim", "ebaegdc", "ubhrh", "ldejv", "mtflwy", "ocpyj", "yopgqs", "fkjxxd", "njnnwr", "nylkeb", "taymdqv", "ekpznq", "cbzobmg", "bucdds", "qjozu", "uvpghor", "obhnu", "ljkxbg", "uqrxjtf", "xwbxiw", "oxsmcg", "spchdd", "pcuitj", "faidq", "tybmy", "uygiyp", "qloizj", "cafgmy", "smetd", "kwcwb", "tdabxf", "fpmrc", "lfjujn", "vvmvex", "mnsgdc", "enjlgsw", "ohwcg", "kxjdaup", "rotjarp", "aovdoq", "oviwq", "qwaxs", "bmazco", "plcljsv", "yytjhl", "vgwjm", "drnue", "vqjgf", "uqlsfy", "bmqmfp", "lkauwna", "ozmqce", "heunaxr", "zaffbj", "arbek", "qjnllw", "fdkhlz", "wgmbwh", "yceqag", "ltjjq", "yurggfw", "puaafsl", "tjiqkyt", "yuzub", "ytmrfq", "ommmu", "ipknn", "iubnuab", "dzthvc", "zjbzpew", "dcooev", "pjydqcf", "zuojlzy", "zwjyfc", "spmac", "dfkbnz", "fzriie", "asusog", "hdodx", "drjpo", "ddyif", "chabv", "ebvkwrr", "burdjl", "jjddi", "dljzkye", "samyg", "zwgxcq", "xtratwo", "qfopz", "xvlaw", "laage", "btdium", "vzlnzt", "kmvbzkq", "kctobsx", "kazbelu", "yxdwrk", "eslvjc", "nhsdmvs", "zuxqcc", "hqtxovn", "zrbdai", "fgjxs", "txecvio", "kjxlq", "dkuxss", "mkbevn", "pzmdqc", "ihyia", "atsub", "twytus", "nzooxj", "qwuoly", "fdoigo", "zukhlh", "mugeaxt", "qqsfyls", "qqtql", "wrvphcx", "nzjfhx", "uequtk", "fxuto", "qnast", "nveys", "ltbrcth", "toctdib", "fbpnh", "umxfgn", "zvjuta", "yeron", "qzvswqk", "gbctr", "ryryz", "zieknd", "zcsna", "jrhak", "zfxqsj", "urlba", "lbozqf", "yfcjaa", "hazgy", "gmmfzyz", "zjvkyc", "rvfdcf", "daitab", "hcxqgum", "qwakp", "ltbsjwo", "pqqtygx", "upxcxao", "qylot", "lmxqc", "dwzcd", "tjccm", "mqcpap", "wgxqtr", "ivycvxy", "wdykg", "snvqka", "jxtvtsb", "jnyowsq", "iwfuoig", "cuoixhu", "fzwalg", "djhrar", "sjmahk", "dyusf", "wrxqvdi", "ftytlor", "jsjbv", "vjbebg", "agvsn", "vvmpgm", "gsgjopk", "vbqvhy", "afopf", "zybfuz", "aqsgc", "ytrjsvn", "wlhdfr", "vdhvl", "jrlvr", "cscxwf", "yhgbew", "wupbl", "ssuhyvv", "bhcirzk", "oykwk", "ijbto", "qsnpgw", "otwzage", "ytqzh", "rgwow", "bvhgkwh", "fvawxie", "fllxw", "gfcqf", "scoqb", "qubrq", "gdxjtp", "ahrpck", "awnlgi", "cmehsyp", "dwmytpy", "firyeq", "oohwhr", "caelk", "mqemvs", "qflkzi", "tfpibll", "ybhzd", "ctsxri", "yurocj", "dnlnl", "ydmdva", "xkaotl", "xovax", "ypynrqp", "kwfzw", "fbgsmrc", "tutime", "rcugul", "cvewno", "typhbpa", "wazew", "flzfs", "wxxbza", "ogjfkl", "vjlebet", "imbubm", "xinyncy", "dqmxfy", "buhagzh", "jjadpos", "gejyz", "gxshqk", "wkwrs", "dqeriqo", "dmixr", "bysjih", "aoloq", "ddwhsxs", "nteqv", "cqagf", "ditsrn", "wfxgl", "jwjqb", "rvkxj", "rxapr", "yrlkip", "npquasb", "nvezlr", "gmhchcx", "lodfihi", "dheypxa", "plzjykh", "qopsthg", "zsnes", "raongg", "zrpnac", "tzmtltj", "jsecdn", "rzudh", "hkcyic", "xsxmw", "reeuwpn", "grkwrag", "gvzzbsq", "lrfta", "aqyvbkj", "ytgfu", "wcmvd", "olnvfi", "hhgmhb", "kojmepr", "wpohl", "szhgg", "hymiblu", "lkwjr", "zulqpz", "sdcqjo", "olgsgez", "lxkpqci", "yxcgn", "gmvex", "fskpppe", "utzto", "axncvp", "lcyahba", "ydeae", "zvzar", "ghfkkqv", "ryrpg", "gucpbq", "reofjz", "cdnoo", "dchhh", "byiwd", "cqbhok", "ksfnoa", "xsmmlr", "qyvdfqh", "dzshj", "bpifnzh", "uxmoml", "jdxvojf", "ihfll", "vwesfof", "zynnpb", "fwzra", "rxlgww", "vkmjd", "hcjgzt", "mkapfl", "ffjqlf", "wulaebc", "gurramv", "tufkzai", "bxprqek", "nkohv", "abgfwyl", "slslg", "wirsnh", "pykvuh", "fdrwk", "gtmgsxe", "dxsaab", "lqiryty", "aoezg", "tzhugcg", "uoarf", "dwhsv", "rjiuoi", "ycgcdnf", "rtfmwz", "amkjc", "woogtdi", "deprx", "ucknu", "womfm", "xdeev", "qapxpuu", "ngulnk", "fgtxyf", "hnyabid", "cilmy", "wrsewtf", "luvtmo", "wftuh", "ifoeeqp", "dtfdhhl", "rwnburg", "fohkkul", "frqqi", "gsrcyc", "teuync", "dvpvak", "daqjki", "kksscp", "somsde", "tyfvck", "ftfekl", "ahncv", "yvosm", "qgllvg", "ylfwv", "jenqns", "lqovrnm", "iyger", "nfvtsv", "bknxmqj", "pfzybdr", "hqjol", "chlpk", "etgrtqa", "msuxdx", "vnoatf", "ypdzomn", "vsshmg", "rfkipq", "jvpbiz", "vbskd", "edsoixj", "uowim", "hqtsj", "inbsxal", "ookrv", "ipotdnk", "kmazqd", "jpfghb", "gvmnnpv", "juvwa", "xtkvzw", "ejqcl", "ebgcnt", "ztuyu", "dlzthw", "zzipe", "iaxwdxy", "htynwkc", "lefbq", "pizfr", "vttrsv", "oagak", "eqlrom", "vttefg", "dsrmk", "oekbe", "cvugzk", "diwvz", "gxmfob", "vjowzm", "mjpop", "uznhz", "kqvjwug", "wjqvxfg", "jbpwezu", "wsckdx", "slqfomn", "omuxk", "zlgblso", "kvitoq", "dmafq", "djxmzk", "pjqfegq", "yjrttas", "siakcx", "iutiqk", "nwfdj", "gbgtazk", "cpqtf", "panmlr", "aqubhsg", "iwdim", "nqetym", "mwazh", "thyhy", "ydtxan", "xfoin", "lsosc", "esznfa", "xgdisi", "flvbzh", "mpltx", "iwjpsqp", "udfycf", "rntmc", "ltflwu", "wkgbaw", "bcuzt", "hejxuhb", "lguohe", "klnhb", "mjump", "avcwrol", "yrcqlc", "ihxul", "avajh", "gtpauet", "iemzk", "rfdub", "gqnbk", "cfcmg", "iobyh", "iruuapf", "tyifwt", "sbdtp", "mngcpmb", "oaqpolm", "mmimmh", "gxknadi", "bmxhuu", "ulyoa", "keidy", "vsnfk", "cnnnfty", "pkajm", "ddgeecb", "prxidqd", "wmenvhd", "akjcqo", "tnekfef", "ipvsi", "pzjwq", "wmmct", "erdjnuf", "vgeaqs", "nlbdx", "dpvbe", "dgeqz", "aiguzh", "akawppx", "tykrjcs", "gvavo", "hkyle", "yhedx", "xzqcg", "gzdxt", "csssbk", "tmekrmv", "lfsgo", "iizahz", "aszfd", "aybqnsl", "vadwxsl", "ulmiii", "xaxdugp", "sfnnsbg", "dkyruh", "qhpqu", "amesjd", "evjuki", "vtqjw", "aoabp", "qnsuhe", "bplbx", "fdqok", "ozkhgib", "cggwzys", "nbknjay", "ooambw", "evmvegf", "htdlxik", "kahcume", "bojpn", "bhipie", "hdyjslw", "pbkkq", "qwszl", "fgkbzsd", "hejdx", "vmcfhgx", "puzlmmm", "meffil", "boakbiz", "eczot", "fvkkit", "jebfx", "umvkjg", "uikgs", "rycgpf", "rfmfgmy", "nveho", "bgywqen", "gepfma", "vquyq", "wcercbw", "wbpjkxc", "rqloeda", "omclokx", "hvotwp", "tvqfxxu", "qrtghk", "hggme", "arnmfnt", "cxprj", "rspdt", "hlgfq", "dmqel", "pcerxk", "ptqjc", "wzreko", "kahks", "xjnzo", "xzzye", "xbdeu", "koiwkv", "jlwkkjr", "xzdixoc", "xeedvrm", "mrtnhqi", "jaeann", "mvubp", "olklqf", "retbgcj", "qxxlhh", "cqyyoy", "ngwikg", "qijte", "sjzck", "zkmkx", "ongtzf", "tanow", "smgntvq", "urfgt", "xwcroa", "kadcpd", "cxhgo", "walku", "kvvcsyt", "elwmuxk", "bfphtm", "vzeumuq", "sknvev", "vbsnfd", "grmbg", "vjahwt", "dmcbmn", "smubz", "jobbfcv", "ujlkm", "lcthh", "bauuqdu", "kjgzgtq", "gicjz", "nugbax", "kbnjfiu", "sqfpein", "obbgfww", "ykggxjx", "irnmog", "xniuv", "rqiwycq", "hzlgyu", "yjtrttv", "satym", "dgqhlkk", "rghal", "tbekx", "kkwmo", "eahwhks", "bpvmbur", "sqtgkj", "khboz", "enefr", "vkzqvt", "wfruavu", "ninomu", "ypktaoa", "mlpmoit", "fxyhjfp", "fgnpp", "txieja", "dprnj", "bgyrp", "zsqwqrw", "stqzki", "kwiayb", "ulbsn", "aetje", "vwzbb", "tedwyqs", "cymiruy", "jigpoqx", "ypuqsc", "weletu", "gvibea", "chhuldm", "baylv", "wdhovo", "imfqu", "meodnsk", "jhlckqw", "jolyfh", "jsfkrhr", "tnbfzvs", "egcfht", "qnzmyr", "owtrqu", "oqaqu", "xftys", "goxfftm", "sgbnp", "bhfvaz", "gospa", "jwzlvwk", "lqncoqd", "xxizglc", "bwffm", "mhpggzr", "kdaoewx", "anviou", "mqiij", "wkskpn", "enougdh", "vldnn", "gbfgz", "ejmbh", "qsdrvsx", "mrvbz", "cqlufpf", "kbgjlu", "njgna", "admrmk", "pwwsc", "gxkot", "pdjwh", "ejwxt", "bpaxufv", "iwjzs", "xxfsg", "vuhgh", "srytgb", "yesvlux", "tggnch", "cgnbb", "fbzbx", "aomoqf", "zkrvrjg", "ueaoz", "dppacnl", "ewovhxz", "kbvee", "ixeeb", "gwgoqm", "hlwlxe", "fpmkrk", "wzjsr", "ispwe", "garofu", "jcmpec", "tggeo", "yzdeo", "axpmln", "zhnlhck", "duyqcn", "tpqwqi", "jvmaj", "bisgoy", "mpwmurb", "olqla", "ecapwan", "kcpxn", "xcapin", "ooctk", "sgqql", "vcyyjxf", "ejyom", "jsgtha", "logxnjg", "nypadhj", "dprmk", "cqkuzb", "gratv", "tgkjgu", "fttcafm", "tpryi", "ubbhw", "uwcuyn", "zkgohs", "snfesz", "ifrex", "tkbfz", "fvvkp", "otjiq", "lgomjjv", "ertracf", "bregu", "kkbizb", "hyhvn", "zjcnxfl", "mceskuj", "lmupdq", "zdzqzgo", "yorppew", "fpwtjd", "dxvyzt", "bbnnu", "pkycae", "ucvapn", "dijmkb", "nvwwpr", "bufkw", "zhono", "vayxf", "hlfwkev", "klkvkj", "yzgpwg", "lcbqr", "tkkfi", "pcgljx", "bhduxu", "rgfipts", "hkjbrr", "fobvy", "wqmqhxo", "yjgvypg", "ehgoizl", "ipiibzh", "aqxbxtx", "lrtin", "fyyuypr", "pyrocgm", "kwqbg", "ukccw", "wgsbpvx", "pcoivrv", "okhxaba", "bbuaibf", "ccvfm", "phpst", "yxtqiz", "cdfbo", "sijfljn", "gdlhn", "bqmbced", "tiejf", "aurqer", "olmyd", "prctay", "lwflhi", "bbehvta", "oxoda", "lklyc", "rzedhp", "kairil", "envan", "wdcwfk", "xoroddb", "womrlr", "ruxebe", "jnpywrd", "wrifvz", "zkewcd", "vllfrn", "uvdvjh", "bglpya", "vzokkbw", "apaoqt", "xpjizn", "xoajmd", "xapjwc", "jcknwg", "bjpreep", "ffkua", "ukcbah", "bugvkrf", "cbmmfs", "cwaczhl", "nsqaj", "sjeikg", "fayqif", "slowoh", "xjpvkpa", "ynunjle", "bqavt", "nkpqudr", "neikvd", "yuqlzg", "pdxbtrb", "cashlog", "iqiqy", "smjmxv", "zbtpbr", "zzamzcv", "jmakg", "txfswc", "pkaym", "swlde", "utann", "mqgpjne", "pslfvek", "nbiqhb", "bzsianu", "wnxgbi", "ahkeeiz", "dqdfjg", "bptdg", "pwita", "uqyflq", "txabjn", "yznjmve", "mukcqqf", "cxonbf", "ixuewjm", "pzlcat", "eikeeo", "scwsoa", "uaeyw", "oeorff", "gbqgd", "qboqiv", "hiulpb", "dbbdm", "qvdxx", "aypxbcn", "ykjwdbg", "pvfxn", "shrqyz", "zaxtu", "pfefgww", "jwifrw", "zxuud", "kpkwhlj", "lwptgd", "zpdmvsw", "takeb", "ynehl", "kixtod", "fyrgm", "qirzmr", "shyvec", "xjgzt", "bwfvht", "wyehh", "renzc", "nnibax", "slhfng", "yjtecc", "lghvbzf", "qroxvun", "mlsed", "rrudho", "cyffhh", "tjlxahp", "xmaepzk", "jvdzh", "bbvegrw", "cebcz", "odjpeam", "guerph", "tgmphgo", "ohtkqq", "jcxojz", "haeheae", "erydxni", "hatjxx", "kwmgkjw", "wmezvy", "hsuuvfi", "ineek", "grkxmhb", "alxkt", "rmspxdg"]) == 13956 assert s.minimumLengthEncoding(["me", "time"]) == 5 assert s.minimumLengthEncoding( ["yiyqbv", "njqvawn", "wnlovvp", "vogum", "jpolc", "zleec", "sxdrww", "rbowr", "xsjorra", "kwjsx", "vornum", "echku", "kuizegn", "rhuvv", "eemkh", "yshht", "pbixoa", "cmbxvtr", "iupia", "nmcbq", "mgrjsx", "ejvniwt", "svhsel", "kazenhf", "fevpm", "xcwqfgw", "ozikzc", "mywnmqt", "taorwjm", "gcshacq", "fgtasq", "qexygw", "ljmbari", "zfjudos", "rgxuzy", "kmzryaf", "exjfd", "mcqnebz", "ptoim", "zglfi", "fhneaz", "rexgc", "lhplwyr", "dthdp", "jizetec", "obyzg", "rqupa", "yphttge", "wdcdn", "wdomtr", "hchbd", "ytyra", "upytftl", "swbbi", "qpcybv", "dcoxspd", "dftkf", "nwjfmj", "ojbwy", "zofuy", "adqkt", "kpcply", "aeukw", "fqblb", "xurrbpo", "veioa", "puzvl", "bnzvlax", "tjzsdcw", "jarqr", "orxjbg", "ilrqdri", "syjuoyi", "htoqdco", "gwslw", "dpqyf", "jnkhv", "fpqhpr", "baewnvc", "caunsf", "qhbpe", "wlckl", "lmoroqe", "ddlak", "qipwbfp", "cefqs", "surczp", "jtmfuro", "ezhqau", "dlsco", "hywoqh", "lnifq", "hvfmu", "cqjdkok", "tggdact", "rwuowdk", "attnl", "lwhyq", "mqtsc", "bmwajiy", "nyohug", "vvfpt", "lbyazu", "sarwago", "iccztck", "ugsxcw", "rpwza", "yofmlll", "ulhdzhg", "lbaqk", "bwxxwc", "dmsbawg", "tjloy", "imbrkul", "xguke", "shlkuq", "lizjcdu", "kmvykl", "ilqxxjm", "rtbvvqt", "qisec", "zobzr", "thwntt", "afpifh", "uwiiovy", "hgsyecl", "pdgnm", "mqyesch", "suexztu", "msguuwu", "yrykkv", "xtoommc", "muteu", "bamml", "kkhlb", "jfrnx", "wpytor", "zzogpt", "yryxxt", "hzqofjd", "ehtildc", "ptclf", "nyltvd", "nrret", "qqqqt", "uuxunf", "jajxt", "lzdvlc", "gpdtjug", "hjsso", "jairua", "qarxuey", "rpwwjwv", "cjqypep", "tuzgcs", "oytqxb", "rgfmud", "stnwn", "tzzaop", "jpuopzg", "qeywd", "spnstrg", "dfwgntg", "yjyqk", "ioowc", "duqfg", "gmqxe", "xhlbby", "liurjk", "vdujfm", "xxyyn", "omapgc", "koemzbz", "ziiyako", "pjmhfrv", "bshtfgj", "ihjvt", "pnipuw", "fajiuj", "rdvcqzd", "mgknns", "ouwkm", "ejnklwc", "osepl", "gplpyvs", "paxrddg", "gsjlpd", "lgnmgl", "yifeeer", "hhnwlol", "fcmxs", "ilinwgm", "udhfdtq", "ceefc", "xweqx", "jfelwod", "rtywfjo", "kzwrgqx", "fcjriov", "fzytqv", "zcpcddo", "scpyzow", "kbzegu", "gclwr", "gmiwlp", "rtpka", "yiywuyy", "qceot", "dtrgn", "ntwbu", "fxobd", "zmxwza", "qcksyz", "wgbtmm", "pzorve", "hztydc", "jqlay", "ijdkbk", "uzjrps", "gfzibk", "gsxqj", "kgjrkdd", "smdeuk", "iwizewp", "owjie", "kcdccu", "ifltqr", "zrdfbm", "pznbcsk", "mtkpi", "cpasir", "flrxrm", "uxcxnv", "htlfcp", "ltukxfr", "ftbbha", "jhgjgyz", "qjreroc", "vcvtbid", "nrhlq", "gtkpot", "gyplqqg", "lnorig", "fixhufv", "ugcug", "ndfug", "wuorhe", "owocnkw", "rcnbf", "ioiiiui", "kakwtne", "svxtt", "wdrxogm", "ibrxs", "bddqi", "jeguac", "hlftdw", "nutgfjw", "krrzvf", "amxuloc", "deozdoe", "ovsvk", "sfqsl", "slgiw", "jbjujag", "mhiru", "uqksech", "davosw", "nlueljv", "rhtvdu", "ivdpdqa", "qnbenpq", "dtapqq", "hwwfpxl", "oyrfosn", "goxgmgo", "tbvutl", "cbbbcm", "iiugpk", "hinkem", "vvaitk", "pskyf", "hdnekg", "nqhfn", "dqbozx", "zcwpko", "kafyu", "jfegubk", "nofqzsk", "ujmxxg", "akwzemu", "yvhxb", "qqlwofi", "hmoecj", "qwgtlc", "jepvygq", "uzggm", "fztiews", "lvndvf", "vulax", "znqudh", "whgqi", "noguo", "vewkx", "uruvgf", "ubohmba", "aulzi", "flvfdlq", "yspfie", "wugif", "qndyiwa", "keihmct", "rggvn", "ojjmuoh", "sbbcl", "cdivmoz", "vkusmp", "mfddp", "kgohwvp", "rjbbxw", "vsgptj", "hbyjoz", "gufrv", "orxiv", "fxcqfw", "okppik", "qlouw", "lkryigo", "qccvc", "ixcnodg", "wlfilts", "ahqtevp", "kkbuha", "oehaez", "rzczib", "vxobk", "wmetvjs", "xfjgeq", "eadzl", "aeqdvch", "czojfq", "hxshidl", "ofswsj", "iwbqcmg", "schhwtt", "ltyth", "wiccu", "akill", "zaaji", "qepvfa", "mpvrkeu", "dcpenm", "wdhlk", "llqbby", "lronwkr", "rwtguo", "ofnvs", "lxdnwzf", "dctmilf", "zhckjd", "hajsuac", "wpylhy", "zhipvm", "ihikr", "zzwjgvr", "gdglrn", "skhow", "tlqtjl", "uypli", "evdva", "civide", "iroihm", "lvuzid", "vexat", "ngmvrz", "szdhbt", "ggrbz", "bsmovlt", "kguomvl", "onzvx", "nobgxw", "tqxemc", "vbiyx", "fpzpf", "ogtvf", "yuthri", "xszbn", "xcuhj", "nosnpbp", "mowsxg", "tfalyy", "kxombgm", "cukrz", "krmseq", "velzh", "kmufxj", "nvxlkq", "ualvras", "wytoucy", "qicqyym", "pbeujtv", "haojnbm", "xnfffpe", "wvoiald", "rlyvf", "sxamoxw", "ztqnmp", "biiavx", "lnjnzs", "arqdjdy", "pkrgokc", "qxswouj", "dgqah", "mnhzo", "ggilb", "qscrd", "ggvkimw", "qlxjys", "wximi", "aqlhio", "iavtvy", "grkqf", "dwrtut", "uozutfc", "fogxpdb", "ydtntlq", "vnmpmwp", "gtxhwq", "mlpihx", "yfpjlz", "hdvcquq", "nunny", "wklasgp", "wxduo", "topsqf", "tngcpzc", "mcrut", "pdnsmt", "kavaok", "seiqsqa", "bhgkiyt", "mawvhtp", "domcnrm", "fgusghc", "wdaufwz", "tzpuks", "kisndyz", "fwyieu", "wtdum", "ytxhl", "yhzkmuv", "nppnqe", "ccvhj", "dautnyq", "hkaliab", "kngan", "ebmhiop", "vsdkcef", "nmpcnd", "vxvnl", "cwcgu", "zsuneh", "qjgcmd", "awvba", "rzbisxo", "oilqrj", "neiazlm", "hlyrl", "tmiht", "lwqxxv", "gyblrw", "gnnjkb", "lrxiln", "xlwlseh", "npfwcvp", "yjcdhw", "rzndd", "orlhmip", "gatuojh", "osotgvv", "owksz", "kcocizf", "izlev", "smigns", "wtxfwo", "knwizte", "mqjojzp", "lkezye", "xqldbu", "cvbpyl", "aoipbz", "asrupt", "bdwkesh", "jpaykm", "pksbg", "gdbsibd", "lfxpwk", "rmnfph", "yzxwke", "xjwyusv", "yetar", "sytdz", "pnystzi", "yntcqo", "egoorl", "aydxu", "rfdrfhe", "flzkos", "mmjgev", "fbjwmvi", "jeouc", "lcmkri", "aggsb", "aaeazai", "amyxpey", "onxqpg", "qrjpxq", "zanea", "niwsgtv", "nsqja", "utgskd", "hlcum", "frygtl", "xjmqetz", "upqddd", "vxzdstm", "hcmtera", "ejstou", "xkcguf", "bokigdk", "vurnv", "zsgrje", "nbxlf", "tpilcx", "lvepux", "xacdtp", "amdgx", "ubbvnx", "xmvznh", "tlprri", "sthkn", "xhoad", "deotaxo", "pqzppmw", "xlcpx", "qwzrpyp", "lujabeb", "heskwyy", "mzzaaur", "vnestcs", "rryphdl", "ibdiabi", "eoiyt", "znflx", "clougix", "zzadxw", "lrrgtf", "lsdoakf", "yxfmqx", "qhnrry", "ktcdmv", "veygqu", "btjlo", "fcspsc", "gozoazm", "xcsqgz", "aazae", "nkuvask", "mzdgjq", "sihqdhy", "zadrwzw", "gzcyuea", "lpgccic", "fqtfuzw", "bjoqpkc", "oydpkxc", "sugnnu", "hyvygf", "axkxo", "rsmzb", "dlhqmac", "gbqby", "npqkj", "odbtb", "bdsib", "zyasxv", "ifxqcc", "lmnjwhr", "ibuyu", "uzhle", "ccpwhjr", "vhrojnz", "fkzfz", "fyesm", "dnvipvm", "jbbqn", "qdkgl", "xkvvgq", "dphugaf", "soxbfun", "rbgokx", "biveiz", "vbaqtn", "qapydgf", "llldu", "ottjpzu", "fwjuc", "cawio", "gbkwe", "rrnnxer", "luviy", "zsalse", "ckwdeox", "ozhqocm", "vtozfwz", "jztole", "ydqei", "bfugz", "psawjp", "dzlyrwp", "izuyrne", "rbwcfr", "vdvte", "usjbqs", "zzovkxr", "frfkwk", "mmtmdd", "sntka", "wachbzo", "rmzvj", "scbngo", "eqiuiwi", "qfakk", "cckcmt", "owhzow", "rejdlw", "iprsqdq", "twwaldw", "mfilzyk", "jygvx", "iewbo", "irhko", "zpazqhn", "ndqbg", "ayzxqdz", "zvpbh", "maapq", "pzitrfm", "qsgsurv", "viwcfff", "wpgenms", "tjmvu", "czuemc", "infxoo", "avhbw", "nugkqx", "xubakjp", "ndask", "utaqq", "njhuxq", "sdvuex", "tfmxqp", "bydovjo", "bizxjsp", "zoozxyv", "jegei", "gkpqobw", "psumbtg", "gkgoh", "sgcbpql", "xxkhy", "kdorkr", "hcomj", "ulrpyv", "rhplil", "tyyochd", "xhzul", "srdjmns", "kgukye", "yepvs", "xnobsjb", "umxmtub", "wvqasr", "igftpzw", "exhecn", "rreee", "jpxuvxh", "jriqf", "akexunb", "ekvdsoe", "ytzvj", "vfrlyae", "pmfai", "biouzle", "xkbce", "clzyi", "xhjoso", "wmxkxb", "dqzzig", "ydtby", "gskwj", "wlkwbz", "zepvllz", "zsgqp", "blntawk", "eynmil", "bdqyp", "wgtnqbc", "rrgaq", "gtafuzo", "qdiko", "kkcsdo", "zwqhs", "kugzbmf", "wtvvs", "kqsdx", "mxsuxiz", "pgbgjfe", "vodfr", "qbvwu", "vfwbhgw", "ayojye", "kolzfqg", "xnbecj", "akbcnf", "uutrn", "upmesa", "marqej", "bbucee", "bazqbau", "qikgsyf", "oeayzn", "uilxnzr", "vpnxknl", "btgtxgh", "vjaav", "zaxtzah", "msweps", "awduwld", "gzaep", "ngvgc", "qpoqdgn", "kimndg", "qilmmpw", "oafhlyp", "nyelgvw", "onymk", "feycbc", "dhcrx", "siqpfly", "tyvycmf", "huctqp", "uscjrp", "bbptd", "msdmu", "xlxhye", "xnyzcox", "kyskda", "injdkmp", "jiwus", "spjylwd", "eqcrnt", "snfiu", "jvwvge", "yfeaw", "mmdnsjj", "suzdw", "xiupf", "rjwjhng", "tqvasy", "rmibpa", "zuqax", "prpndnp", "efryqe", "pwuqfy", "wpqlfs", "aeswq", "cxkeiue", "jydxzfi", "tzfvwp", "zzgtw", "mupiusx", "sojavt", "dxmsgq", "migjiyj", "kixjk", "ywwvcpl", "khzcuo", "oykhx", "fochin", "foxbfkc", "sizjg", "wrjcvr", "ceadd", "tvfqgxq", "whzhche", "dcoeti", "mpilfib", "cphie", "ucpnjm", "ajltvx", "kpizym", "vevfsrs", "jznrri", "yvhxomr", "cbcnk", "yuwuhu", "jywuzed", "kqakusq", "jrnzgfo", "mjimzz", "mfjybnd", "ntqyq", "junxxck", "myvqajv", "kvuqs", "obfxw", "jwuba", "vnrvzvy", "aeric", "vtgda", "nkrocpt", "ahitg", "dzxtr", "zswwc", "yhxap", "fdhiwr", "cpxtqv", "izbmo", "zyioo", "vysnoe", "ouuyvj", "cumdhzn", "dbsmph", "cktjem", "vbmxy", "utgfyhc", "rqdeorp", "btnlmd", "chxwlt", "nsghoqi", "egycsm", "wkanat", "lzjyf", "donyx", "cchqsa", "xozzz", "yzmnf", "jfzuh", "dpcpg", "hlahz", "vobopk", "lssfeli", "ccttzi", "glzgqpv", "oyqzug", "qqhkrr", "euwotv", "hwbmtz", "hiylhly", "bppzne", "yetyyvs", "cnbwcby", "hzblk", "pfjmxt", "dsxvt", "vvkju", "zjrfr", "gdbhb", "udoad", "nbhpzfm", "iwetbym", "atmly", "tnxli", "myegb", "hiwqsk", "btrajk", "nhrmwn", "ftmbecv", "xopht", "eiikqy", "qizanwa", "cwxiatf", "jshjva", "llrtkn", "zhivu", "lmwiu", "oaeaqz", "oxotfub", "jnkafm", "juhrmq", "mqzbtw", "puiaxty", "dnahvoj", "gaxhz", "xfnay", "iqmlnlq", "xudhcg", "izpkz", "tqttmt", "bwnbs", "fdufd", "vhzyymh", "zhqtxr", "evbcrv", "xvnma", "dgcwy", "cwxzlbz", "oodiol", "teyim", "kqqfjub", "ftsqzi", "arfztkr", "oqlujx", "rpkkdov", "ptoff", "ivxaxr", "nxeept", "cacpl", "tehir", "spvggl", "qfzxkn", "bhwkukx", "fkdpuq", "xdrngre", "fnfplq", "dzbrl", "ufgxu", "sciec", "fgdydvw", "nmpaqxi", "ydsvfv", "natjz", "lruyvzf", "xznznxp", "mhfrh", "kddsk", "uwatn", "uklzs", "lnuta", "ryizc", "cvwko", "tnzpk", "ywpiv", "vbvcagq", "pzolw", "nmyfhg", "cshkofj", "ksptw", "kqejh", "zgzjqzo", "mxzrw", "enabosq", "vmubgc", "sfzcj", "hewvk", "ewhrq", "oifnsmi", "izdnvu", "cshgtk", "mqotuhd", "gnqgj", "rxailbm", "iyhxvtu", "ncjzklq", "zjmnoc", "awqwos", "ugujppc", "spbvfwl", "gntsvo", "euksu", "qnvneph", "crhmf", "brktmf", "mvgmr", "yzcskrp", "tihawec", "edqmxpn", "fxyymlr", "dzfkucm", "prldz", "gplrlhz", "bohwr", "bhebbk", "mmecj", "segydd", "ptslsb", "pyhgw", "cwmrq", "mjfhflh", "xhuid", "npxmb", "izilq", "dczhqh", "tgfnxtb", "zrylvo", "lctxrar", "ylhrbii", "rfxedv", "llvhzjq", "bjocv", "wbnex", "cnohnf", "xahrl", "rouvwyc", "hbhovgv", "dhucp", "ncmff", "ncsskg", "gsjbyin", "lroxscf", "whfaenl", "vsfultg", "floxkpy", "captoai", "qwolyex", "ggaypn", "wzunypd", "pjixeu", "gxnjkoc", "pqiqhn", "xakjmgz", "vqizkx", "gdzcxr", "kyxwdd", "pgxmazn", "qeuwf", "bduknm", "tcrcn", "nehgee", "wktbcgu", "jwqltdt", "wczkai", "drkqs", "qhdqnn", "oobxirc", "lbunv", "ifscr", "xnfpbrw", "yrrdbax", "fbocs", "tewne", "iobixe", "zgosas", "yhesn", "xlqwd", "pfcen", "slsjffx", "ilwatrc", "mhsmgp", "iteghl", "aqhufdl", "kxgpqcu", "ryrcgp", "azidf", "smlnl", "rocxvbt", "iutfc", "loapgbr", "musulp", "dqcnj", "tpgbkfh", "wvskii", "itkfopo", "kytyb", "rzahbu", "aewptd", "ohergbb", "cadxh", "aphwelj", "huooyzn", "gtttia", "izeyhcr", "cfvxz", "aitaxyp", "vypqost", "ebfnmif", "kgiucm", "zryyu", "oxgnbpt", "frpwo", "ouqvodl", "pdaazh", "gxwmf", "dozxsjm", "yndpsik", "zcwvu", "mihug", "jgodklw", "ysklw", "cfxqv", "yqvtz", "rctnp", "xjywa", "kpqyw", "hhtegzt", "rnwbeoi", "uyxqum", "jahcwbe", "jzjns", "ovwoaz", "oqmsrua", "natbejl", "deffv", "okgbr", "paqhy", "jkafhte", "lifsknp", "afmskh", "oemdro", "oxuwov", "qtyxa", "hkpfsm", "ulaubn", "tciurw", "myohwlo", "okuiejb", "ormoqsb", "gmipz", "hterzir", "ekxzre", "xkevge", "ihenf", "nnhzv", "eocjmx", "upzal", "oounfko", "myhbwub", "fwipva", "pkzzvpd", "nrupm", "vluzq", "fxkoyho", "atzktr", "aomrp", "qwpser", "ejagmb", "cfigelm", "bvanb", "cgcgabo", "hmjvlqt", "hxxocf", "ftqaud", "htuipy", "bhwmcn", "tgyvaqe", "lvuwh", "yiabzs", "rzzavu", "fiubm", "uuqsb", "riyakuf", "psscffd", "kvckzr", "fktmnf", "ivzqexi", "nhxzm", "kffjmb", "vdzxv", "esago", "bfikw", "gaiuxmz", "volokcm", "jypcs", "psibvs", "hxaxklf", "lmqwgy", "spnbimo", "mtihak", "xikoiy", "rmmtv", "phaqgxj", "zcuwkhk", "emodbyb", "ztahsya", "ieiqm", "lfoquh", "emznnq", "pnhlgut", "pgvads", "cqsjx", "lxnjei", "zpque", "rdjbiyb", "sxedpu", "potnqva", "iirkn", "rjmnrxd", "ksgcd", "waeymnh", "tizdz", "kproa", "wpttygd", "lvyze", "peewvgm", "fwtyzbw", "zitkk", "gfgqr", "udgvlz", "swqspo", "ohhvyq", "kgyuau", "hcerp", "pdomlm", "twabkk", "zfsea", "epiwp", "xgycjpt", "jtkdh", "mxmdm", "rtkzm", "qkacy", "nuvdiq", "agctak", "hypgyh", "ewtjp", "paysolw", "bcutebe", "xelxyb", "gzdvrth", "vpzfv", "cxrkt", "admiyzi", "lqlmn", "zbjpbg", "tlvdnli", "zetnox", "ylcsobo", "balajod", "igoume", "sxcgw", "sbkkafk", "fmndnnw", "incsa", "jyupkg", "uhvvc", "rswnbth", "nvprfj", "figqf", "znyidqi", "aijper", "euidr", "dftxkze", "vnppi", "splwifc", "fprgafl", "ixzaz", "mrhqtne", "dtkjsy", "dsmqrgy", "xfscz", "cymvmpu", "vptkfdx", "zrgrjq", "mqvwsur", "hdtlw", "ugdpwun", "cvxitc", "vytvqg", "pmtpfz", "nfdtdt", "umvwjuc", "jouxc", "qpypri", "pdhqp", "lmise", "wlsvcfg", "aqdkzcb", "qlrmrfz", "pbgoyi", "xmsskoh", "jjdye", "xvsdmq", "ymjeipy", "igjyv", "uiojvmc", "uckoww", "grlnyeg", "hpglp", "omnnyy", "iiliir", "cnucbcx", "pcxvs", "hipad", "xmiltkj", "oorwi", "qgoxjj", "jnmviqs", "wpleqn", "tudxw", "pcogem", "hgewaf", "niwfexy", "vcttgcb", "anjgovq", "epgmscd", "mdtru", "xvapv", "rydjik", "kopppcr", "mjbsmu", "unxoakz", "ldpsw", "frksjr", "vyxxg", "yyydri", "szidq", "qvbtd", "qratl", "xwfov", "bzhqyxl", "fskrtf", "pcpzmnv", "xuxwx", "vzbevnb", "ebaqz", "dbpuek", "ooqwj", "gaimp", "coelqh", "bwuceq", "oxpfjt", "zrqyc", "rwllk", "pqunv", "ufbnn", "tbnjoz", "kkqmrxu", "qyyrm", "hislf", "wyuck", "ubpre", "pdioi", "aryhv", "vdcxv", "rkgmaag", "czlzokw", "gtxuduz", "grpijx", "qzrar", "qhues", "rmznt", "sxxmved", "onjzuwl", "atbjhip", "nrardl", "alrocy", "cfkip", "ihtbf", "pqdgm", "hmokun", "dpghac", "otwml", "mnbzwa", "ehetlt", "rchvq", "lwjgywn", "lzdmjo", "nvhohdp", "tmshcpc", "gavjv", "ycnkv", "uynzh", "bvpnfjq", "lfbem", "qberui", "vrmmhx", "wpbqtfq", "jujpx", "dujgkof", "hrpbso", "zhcdt", "iybngyb", "rgeruza", "nesyxr", "cihgfe", "hjgskb", "zspxeqm", "inzrgyd", "crkjq", "iooshwp", "muvvj", "wakis", "rowibwa", "qikwypf", "aportho", "pubcgx", "vqoqpfi", "rnpbri", "ussjv", "looor", "xkzvdv", "tstegg", "zgiiokw", "rwvyaun", "mqqla", "asnqp", "nghuryl", "hlvhn", "ecuotnu", "judvbu", "xgvuw", "oeckn", "hdhttsg", "hcyhu", "klbyjc", "tnrmqnc", "mjojxhi", "kvdet", "vbmevim", "oglrzs", "afbscdi", "zxrffti", "firzgmz", "oenim", "wgpua", "asiep", "kyteq", "wpeneca", "qixmeoq", "zaofon", "csxxtr", "cpwmnl", "feylas", "idjuo", "mrtpvta", "jjvmjy", "mnljocc", "lnvjleq", "oognud", "rbyneq", "rhvomm", "fldrkpk", "znvrp", "myswmz", "jiloe", "juivjmo", "ylhbyzl", "ndmabkt", "sgdvlq", "pmnddmi", "utpuj", "kfisv", "nxfeell", "mxhgqd", "ccvdsdg", "emtybo", "zmkylbt", "mmrpi", "dkwlgq", "iwlappb", "uimsrnu", "mkxaxmi", "tcvll", "njggal", "kmqud", "evgzlh", "oaxizbp", "jiuej", "xknlp", "cyksydh", "gbixmz", "vtouyk", "sxjpkio", "qhubt", "kflvnb", "sjdfggl", "bxozyj", "xekbh", "wtmcb", "xtapfco", "rnornl", "ursdpki", "waonim", "eibfyed", "zniinaz", "uyfohq", "qcaxlt", "koyaapa", "pjuvbsi", "ecpdl", "ifaqwm", "yyumzc", "gvfngfp", "lttul", "flyza", "uasdlme", "oklhb", "wulkzzv", "ziwsxo", "jqcxiu", "qdzrwgm", "zjdwy", "uumns", "emlnp", "irnrqp", "gqkza", "oynpcz", "yxyea", "zpamf", "gyehxbv", "nplkhcc", "rxeekyo", "kecgp", "gseju", "nkisxqf", "vlyud", "fxxihhm", "yjgtml", "fehwpdi", "wclnvyy", "lriwrc", "ikparv", "volfh", "ysphh", "szrvrv", "rqlmz", "jyqut", "fyftsj", "uvwfip", "rngwgm", "mjwaz", "roehjki", "ploxokr", "yjbalp", "fspkq", "yfxrb", "kzulvk", "ordxp", "vdrrt", "wdiojwd", "ridzl", "niykdvu", "whyycmn", "riwcma", "bkhgkrb", "nsine", "emgtgf", "zoymw", "ljtvhzb", "kfyfdma", "piygxdl", "onfwgdf", "fwmkm", "vqbljay", "icife", "bxfli", "yeygr", "qenhgm", "mtxuckj", "kdcyx", "kwqhfcn", "ywkfy", "prbpw", "pheyc", "kmnds", "cacqs", "kvekiqy", "bfvfhdy", "gxulp", "skmcra", "exomt", "lcxue", "mnvvday", "rsddl", "gooegc", "udght", "doymnin", "ccdap", "wuive", "dyyln", "rynust", "luxabyg", "kdkkyyw", "vawqfsy", "rmeswm", "rcxzyv", "clpowz", "pdntqm", "tvjkkmz", "iiclw", "nhudzen", "cybhu", "crwtw", "enypnh", "ygekg", "hrjwqt", "peissge", "wangcy", "rbpoik", "raqulbf", "gyisnsj", "rgbqn", "lgvuzb", "djicf", "epnuu", "nsapc", "voatgh", "yorfehc", "jxfttat", "wyuivb", "bwopl", "odwdsh", "anchkv", "sepvew", "qoxxmae", "bpvqnj", "sngfo", "buoazou", "zhijssa", "janng", "uvdbd", "yfvkqo", "lcjii", "mvacvrz", "xztiar", "lpbtrqa", "ukbpdx", "okaqpgr", "idgqlj", "ewglgo", "ruymhi", "pcidw", "bvuqj", "npzch", "yppyan", "oiguirj", "iijvwqj", "jvbwjys", "yjtunfc", "iaikra", "oduhdgk", "ivixur", "ibcgai", "djzvcbx", "lmtsul", "lgnwzol", "wursq", "xsxbqwq", "jqvwnc", "dcwwvtb", "vwybnr", "bughwjl", "rnelxb", "hmacv", "ufgdygl", "aabuat", "oynwask", "gnfjjf", "zipbq", "zxstn", "jdrbprf", "jmkvny", "rblpql", "vykdj", "qaakyqw", "osbhddb", "avgldyy", "kvpoa", "fnqcliu", "zzlninw", "drsal", "omswys", "hwqcpct", "ecraq", "fvhsbjq", "raauy", "pfmoz", "vvqvcm", "tbjqjun", "jcfbegq", "otiwup", "axvvce", "dhpdnx", "pennr", "hvvmvzv", "binezl", "ygdmcuo", "ypwnqn", "aloxdv", "ucieh", "kovbtag", "rgfpaww", "fpbftg", "spjowfr", "zridoy", "blwbbf", "evwlxi", "itbcz", "hgixuo", "qmoqmjb", "tkeeis", "pjiaq", "rbpje", "ledoui", "ubecht", "mphdd", "uzswsbb", "ntsybr", "qmnijyp", "pqwawe", "ltytill", "dpnxy", "pkxqcol", "ayrdi", "mycnd", "knotsn", "zvcrjl", "qwroblg", "vtrktey", "dzilezi", "wzkxg", "varqc", "xlpttyc", "xxqhnl", "jpxywa", "kjdsh", "hdseebw", "bxqbp", "flazqce", "xrtab", "rupsfq", "asswer", "rhqof", "hjzdv", "addsgax", "cuahzjj", "xwdilr", "osqgg", "pfhwv", "rqorah", "ggdlnv", "truvaoj", "jzuldwf", "mjddj", "vixtn", "eslxoaj", "cmoypm", "jvvzs", "oqgxcc", "tptls", "wwgwbj", "tysuhg", "xbnqb", "iogjvg", "fbxdmr", "zdvsmx", "hiuja", "watrt", "kjawab", "entxk", "jmnkaox", "zznsox", "asmzc", "soblvp", "quyxjw", "udrdc", "hyylvvw", "gzfwxuv", "jjqmjw", "faegxbl", "lqjcg", "bzmruq", "bykuh", "miwhd", "ykgtwhk", "oyobzwi", "oltwpua", "ctulabr", "dwandd", "vhuhox", "vtlknw", "ywvln", "qemqdeg", "akezvx", "kjmjpv", "vwuftx", "kreaxnj", "fvfop", "cxabs", "jfacbje", "eecnz", "cmblit", "gfvpoq", "whywnh", "pghvx", "ohgkmf", "xxtiwd", "nkojni", "dlcicnp", "bwyvyyd", "gifup", "vgjfr", "hhteifi", "kjhffq", "pawqaxl", "yozro", "slxluvd", "amqcquy", "vnnxkr", "wgdur", "rvawiu", "thcwnc", "cddut", "vnrtrv", "fnfio", "nhvxe", "rfdqmj", "ucblh", "ccbnt", "lxckaoy", "fnwcbx", "gmdbiwt", "ypvwjy", "cbjazk", "qmujnm", "nsqot", "lhcqt", "ijxcts", "nujrms", "itxel", "ghukr", "qpwitlr", "gcafqrn", "lcoho", "lfzab", "vwhgceb", "vgsgy", "jrtgo", "ryxlz", "deoyq", "ybenly", "lyysca", "sodvazo", "hbnnoz", "ovgvda", "elwtjx", "soydmn", "trdsi", "mwwjwo", "vupwj", "dszpcv", "kkhjdj", "ewmyo", "nmpeq", "oepldcq", "xttrgu", "wbcbxi", "jakzk", "peukyw", "fvcqv", "xklwuu", "hsmva", "kslmkq", "azllbig", "stnzih", "wfyud", "ihauy", "cfxmj", "pdyogwv", "dcqdpa", "xhusy", "jfpmpmm", "odeiiw", "ozyaer", "uykzvma", "tuaznxj", "kdnbdki", "syrnsem", "fdysz", "hhrpo", "fglzfi", "vgcqzqm", "qhsjr", "bvboe", "dpfwpvg", "mvvry", "itnnr", "lgykbe", "pscow", "mkrgeqv", "czffv", "apteht", "jeqixsx", "ksmbe", "zamivv", "vvmyo", "cwwoce", "sppubxc", "qaich", "nmbxr", "tfkwfxi", "iakhezl", "fxujis", "fkwffe", "antaylq", "mmfgstq", "zxaacy", "zlswx", "pbqxil", "eupck", "qzcxpbe", "rjalbzr", "wioagbq", "kreec", "zsdcuft", "rrdzb", "ocdlvq", "oxiroo", "zcxsqh", "wbrsi", "fqike", "oskzupi", "thvof", "dicbyst", "iojwe", "hyfizq", "yoknhww", "nupiyyn", "ievah", "slcgmxg", "cnecpa", "lcwsoj", "hnqsc", "ghipbi", "exobr", "nwpnq", "dmhbj", "amdbmwl", "xfbzovs", "puizvu", "yvsus", "ykysqg", "bgqdv", "zgqbr", "zkjpkej", "crkot", "zciymk", "tleogn", "sayrmz", "elwma", "zugjva", "uifwsmw", "wstrg", "xbotd", "hinsg", "qpgyoyp", "xzfocdy", "mbvuepb", "dtphufk", "cyapnt", "yyehhad", "ohdrd", "mlibm", "qzdfil", "rdwszqx", "bzcbmyn", "uarjlg", "mtwpqmx", "nmagl", "cepniel", "tylvaa", "melhd", "jygeneg", "fdglfy", "xcpciu", "ayrel", "bxceshv", "kspyg", "iclkaz", "ykbzt", "nrnkzo", "kxkto", "fabzszn", "edalls", "nilmh", "wwawgnn", "gymbtx", "mzipa", "ajevx", "qppisv", "otqhsf", "ippxak", "bixnqd", "uqitwo", "soxcug", "loiscd", "wqrjk", "rqntoa", "fzpxlp", "tuaob", "pyqqms", "krbzmmj", "aijqpfg", "nstqrbu", "wmtiahz", "joplby", "jyszxq", "jnxtyhe", "lbvfv"]) == 14011
normal
{ "blob_id": "57de9a46dfbf33b117c2dfbb534a5020e019d520", "index": 8513, "step-1": "<mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n <mask token>\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass KMP:\n <mask token>\n <mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass KMP:\n\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n <mask token>\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass KMP:\n\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n\n def search(self, T, P):\n \"\"\"\n KMP search main algorithm: String -> String -> [Int]\n Return all the matching position of pattern string P in T\n \"\"\"\n partial, j = self.partial(P), 0\n for i in range(len(T)):\n while j > 0 and T[i] != P[j]:\n j = partial[j - 1]\n if T[i] == P[j]:\n j += 1\n if j == len(P):\n return i - (j - 1)\n return -1\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n\n def minimumLengthEncoding(self, words: List[str]) ->int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n return ret\n\n\nif __name__ == '__main__':\n s = Solution()\n assert s.minimumLengthEncoding(['time', 'me', 'bell']) == 10\n assert s.minimumLengthEncoding(['ojtnj', 'uuydcho', 'dgsyp', 'dwxycpx',\n 'dpmvc', 'dvfhmb', 'flrxjjx', 'fwhdhvn', 'rgsakp', 'aiconf',\n 'nzacpk', 'sbxnaj', 'shway', 'rgrmz', 'rysudo', 'bzkioce',\n 'mqxkzvu', 'wyebk', 'tymoaz', 'mlmbg', 'djbmek', 'qfnme', 'khkiyae',\n 'tjdaxry', 'sqtcwz', 'ehnsai', 'jhncvrm', 'cxkzgrx', 'pummt',\n 'hzrpfcn', 'lkyqit', 'phpqdxw', 'vangm', 'wcjdgw', 'pxesvtn',\n 'mnqory', 'bdrzvh', 'brtzmo', 'chqgf', 'bipyxm', 'meoikg', 'ysyckk',\n 'ojayeiq', 'zrfbsb', 'yhuotea', 'crfbhq', 'tllycn', 'qxnzihf',\n 'avyawpz', 'bwsjym', 'myjozc', 'lbdksm', 'mctlt', 'dszowuw',\n 'syshm', 'xrvhhkn', 'kgrcwfv', 'dwlajlf', 'yviuk', 'xegjj',\n 'spiczl', 'vfvomi', 'mgcujy', 'dqmzb', 'isrisgt', 'vdrtuah',\n 'vsyth', 'eoclef', 'poccek', 'cgafrlu', 'crbhpgk', 'sromv',\n 'xmvbca', 'gobra', 'ygvlq', 'pjvhe', 'tfweiso', 'cskuohg',\n 'eyalone', 'pobkak', 'nzpxn', 'lbcrws', 'uhtfe', 'eorth', 'showvu',\n 'hxsmb', 'jrggose', 'izifkb', 'oqwyf', 'mozmzj', 'ijwle', 'ggtqqqv',\n 'geevzj', 'meota', 'ifsse', 'kdtofm', 'swydhvf', 'tzjhqap', 'wqwwd',\n 'jlinnov', 'lmxkgeg', 'stbot', 'xrsfn', 'etoyctk', 'rygagm',\n 'vcnrf', 'zkdge', 'emqtscp', 'newqcyy', 'nnuus', 'exwsxbd', 'zstvl',\n 'lbkko', 'kygkyqq', 'oggji', 'xytbjo', 'mfbahk', 'ggoks', 'lmqewkl',\n 'qexhyqe', 'ogaogio', 'nzvbav', 'mdole', 'qvyks', 'gkupfu', 'dgmpn',\n 'ngrdrj', 'iitqvk', 'ipuiqb', 'ugxfea', 'ialkmv', 'hmgnx', 'aoyoj',\n 'fvzhjil', 'butrbp', 'dwhxnes', 'etkdwg', 'cjkghz', 'tovkq',\n 'mmxhv', 'jgcsn', 'hmictal', 'zxmnek', 'pcoeg', 'ntyqmlq',\n 'hfubhtg', 'ydjbv', 'xnwlqto', 'hatgi', 'bsaczd', 'pokwk',\n 'arxlula', 'zjtqlk', 'ocfxup', 'nsnqjc', 'xdcsopi', 'iqxyxp',\n 'xfmtpvm', 'bqtgcf', 'wboycn', 'aoeda', 'uowqdgj', 'rzzzx', 'liucs',\n 'ejzxz', 'qmlehsh', 'igrbmon', 'dpmkbon', 'pmayh', 'nujdwdw',\n 'awdgo', 'ijgkzk', 'inhee', 'jzdtv', 'adhauh', 'grtmbp', 'qndbvw',\n 'zprrw', 'mpqieq', 'jzmzeuu', 'fcvftqs', 'qxzxqy', 'lidguzz',\n 'eazwd', 'zjhfsz', 'zsnzefh', 'mnckfg', 'zjgtq', 'ckyxlif', 'fznfo',\n 'jegnof', 'lzwyzb', 'ozivfio', 'igkclsa', 'bebzn', 'bitsggm',\n 'lrnwin', 'hjnnzr', 'idvoirn', 'dgile', 'vfngh', 'xbmur', 'rqaftt',\n 'wjwwwxs', 'btreou', 'gjsycg', 'pvsiylz', 'ccxzgdf', 'excrrrr',\n 'fiesr', 'jdioj', 'uzwsc', 'odrlcoy', 'hcsit', 'ptwfprh', 'sbqry',\n 'kffvy', 'ejeawbp', 'omvcc', 'iqgxqlt', 'edsuu', 'xnbue', 'qfbcx',\n 'fzlmbkl', 'wrrcueb', 'mmqispp', 'nknilwd', 'dewuhju', 'hmdqlxy',\n 'vjxgg', 'lkuexo', 'dzvfscm', 'voulbs', 'uevoqgq', 'kmhwu',\n 'oglzllg', 'torhihn', 'fhuqzc', 'mmcfhb', 'woyayma', 'uznsvre',\n 'mmxed', 'aoskwg', 'xrosbm', 'hpyrgh', 'tghwbwh', 'hcwzn',\n 'iepeftj', 'judij', 'kudbk', 'jonpv', 'lywck', 'rxelz', 'bgifz',\n 'mehbxq', 'fmqnz', 'sqrmzj', 'iqqjzex', 'qioliz', 'kjizbf',\n 'lgdcffc', 'pfgmcr', 'trdabul', 'vlqjdnc', 'jjvbxe', 'fqlayw',\n 'ilbhtyq', 'saawulw', 'gxysrb', 'kighql', 'eceapr', 'kztbcww',\n 'jedkoy', 'dxpcaga', 'ndacphe', 'rcoit', 'ywgcnxg', 'klipfup',\n 'bddws', 'jwyof', 'lrfwgo', 'bediwuf', 'ujakh', 'ppima', 'xzhwvm',\n 'guzmsqt', 'ffbliq', 'adjmynm', 'akabzn', 'inmykju', 'vlcjyv',\n 'orquepg', 'tufrk', 'vqpjymm', 'lvuab', 'qzxav', 'ekcmu', 'uqtuhie',\n 'kfvtgf', 'nklwjo', 'ujxlfpl', 'zobfpq', 'eignijd', 'ythctg',\n 'artllm', 'wodhh', 'tzpwszq', 'njdqegg', 'hzrqib', 'zvoxtfd',\n 'htboem', 'axjuix', 'bvmvm', 'jbnum', 'bxdth', 'atejt', 'gqsqtnk',\n 'fykrjbp', 'ldyhonr', 'wcuoj', 'upphc', 'agydg', 'cjmwk', 'rhxbqh',\n 'tpgozdd', 'qyqoy', 'zjqutw', 'qoohqny', 'nsiacwz', 'xupin',\n 'criuvs', 'eswjeft', 'pdmevn', 'zvogq', 'lrrvo', 'qhfqqpw',\n 'ktudfg', 'ijvmi', 'neyjjdx', 'rllpi', 'vllvaa', 'esebtu', 'jyhcrh',\n 'otgmr', 'oudvyxj', 'pmszy', 'opeed', 'gicni', 'mnuzn', 'mjbfpod',\n 'sqwgxu', 'dwniwz', 'wmbmmv', 'lyafuy', 'zmvlz', 'kopxzuh',\n 'urcbbiy', 'guhco', 'nerjm', 'lpdxc', 'hxmjzz', 'hynagc', 'iyxeczi',\n 'bdfxmoz', 'yybnpqd', 'jvgnb', 'oquqem', 'fmclmz', 'dmkhf',\n 'zxbjpp', 'qpxgcir', 'iecvjm', 'gtkne', 'lgtqrbc', 'gilbn', 'mcxsg',\n 'ncwbhn', 'wkriiq', 'zhsir', 'ptkkmw', 'jcbpkrm', 'vbefo', 'vmbcd',\n 'vqffj', 'fhqzjt', 'nryuh', 'vmclav', 'cjyggm', 'sanev', 'rrdocz',\n 'zqdexbs', 'jrxstt', 'pyhcesj', 'aagghyr', 'cyemjrb', 'aliohf',\n 'qaslg', 'pnyjzxz', 'pehnvi', 'suhuw', 'twopabr', 'sapqoc', 'mckrh',\n 'nzlgrxt', 'aqpobnu', 'pirbjgb', 'plzlj', 'raylxpu', 'gyasfrh',\n 'urjfxux', 'xjbwau', 'iupknn', 'vhxnc', 'dnbjop', 'vrxhwmd',\n 'vjsmkh', 'rfmqids', 'smaiwt', 'vkyfo', 'bjqyxc', 'rbbbp', 'dlkzg',\n 'dwvdwu', 'prulzh', 'bavge', 'ehhrz', 'xxjqk', 'pxopmp', 'okmkmb',\n 'slcznpp', 'nvqlb', 'jalrk', 'parwlcd', 'anbxo', 'oqcxyzo',\n 'fjhrdjh', 'pgvnwfe', 'yfjyvh', 'quvszjm', 'xyiig', 'xtncqv',\n 'svsix', 'jvpdnh', 'owuiv', 'bsrugtt', 'rmvggws', 'lmdql', 'kvmvd',\n 'xrpmaw', 'ssnxyb', 'oworq', 'rmmpuya', 'rijpih', 'aelazka',\n 'kncksqx', 'yvtdiy', 'epato', 'pbbamj', 'fejsw', 'zgsru', 'ekwrre',\n 'zqben', 'vugxi', 'fvcsdp', 'rujcews', 'asqxya', 'worjlsd',\n 'xggakg', 'kzfpot', 'haqon', 'ypqxzz', 'mmkzwt', 'bdhif', 'exzhv',\n 'srnklzh', 'hlrunb', 'dwfyke', 'fvgbtdm', 'aeutp', 'czhefx',\n 'tegfw', 'jkxpsb', 'gxkfkw', 'exvntd', 'gvuti', 'jdmly', 'owaqhw',\n 'fopuxzv', 'edrvil', 'biszwgv', 'vgckzd', 'fqdxn', 'qktdf',\n 'hpgwrk', 'gpxiips', 'vxnlab', 'yylxz', 'hsuscch', 'bhivaf',\n 'wzrwtc', 'ebplv', 'yzxykou', 'mxlssom', 'evghv', 'hksleg',\n 'shybau', 'zeyqa', 'tljqka', 'axfkec', 'fatdj', 'janlkcc',\n 'sjorbra', 'jplge', 'oazzot', 'qbgtncn', 'ozlil', 'stohadq',\n 'rvpuwn', 'oqwpl', 'byftgi', 'ubuusl', 'fkogr', 'bybdyhj',\n 'vinyuzs', 'ivsqvz', 'vmnae', 'gckxw', 'rozbe', 'glvxwj', 'rcgicu',\n 'xmvbd', 'itycsry', 'llmwrs', 'fuqth', 'styrrwl', 'wsseuln',\n 'xwflcli', 'muxgz', 'ypmbboh', 'rpmvnep', 'wjvvnv', 'arjnw',\n 'toauwc', 'ltjxqrl', 'basffd', 'clxozwd', 'glmrv', 'iejgfj',\n 'cvkoj', 'wotjf', 'mqucec', 'xalgemc', 'hgimkh', 'golvfq',\n 'fuqpmak', 'mhpcp', 'pxoibt', 'ledqa', 'guzbyr', 'ztvbeka', 'racdp',\n 'krsngra', 'aaiknz', 'bhoobyc', 'xibbe', 'yohepxk', 'eclevs',\n 'ldliwcm', 'qatvlk', 'eiypbw', 'vxvtwa', 'nkdwsej', 'ftmyvp',\n 'gpthye', 'gazwoi', 'zzgipon', 'cithg', 'wpabujl', 'jhezlnb',\n 'vqqaxfg', 'kvpbk', 'vggjemp', 'owylv', 'lgwtfpg', 'jjqvfm',\n 'xbhga', 'tulvfv', 'sefuo', 'hbysv', 'ozopepd', 'awyrifd', 'pnudwx',\n 'vreje', 'zhpgw', 'qygbf', 'tvbrvy', 'zzmcw', 'cznee', 'deuzxt',\n 'qfppjvi', 'ilkps', 'ydwhg', 'krwkxzu', 'mnsidg', 'rkxyyr', 'ajkqz',\n 'xtmom', 'vqocor', 'fympcl', 'yyleyzy', 'jjvzhrn', 'kpmxvuz',\n 'txoeqlx', 'lhhmn', 'chzgpf', 'ncnjxle', 'ihxrg', 'feqixq',\n 'lkfhcar', 'hfnsh', 'bifczy', 'umknat', 'yrhgkh', 'mgpcu',\n 'qotukst', 'yqlmfq', 'ttcdp', 'xnjjzm', 'cukbr', 'hjhjb', 'iikfcsr',\n 'nsqbnnz', 'dauygf', 'cmydq', 'lfnhqnl', 'ppqgs', 'hscbfug',\n 'ohzisud', 'opspdkv', 'aauxbop', 'wpkhzo', 'sxbsgu', 'tajrv',\n 'ololy', 'mxmus', 'vizvxv', 'osaqz', 'rxygkn', 'mrzqlf', 'zrriyxb',\n 'ufroe', 'bajozg', 'atpsu', 'uhgauzu', 'tffdw', 'mdjulde', 'rbrmy',\n 'jhkqvwl', 'gzsultq', 'nkbfi', 'xtvwh', 'dryzcv', 'emaxuk',\n 'zucvutb', 'jdduyk', 'bjdin', 'loicuq', 'qhjjb', 'rgfjbq', 'mphnk',\n 'lxvceyx', 'zeoxb', 'fxhnxu', 'qpbipe', 'ophwp', 'wiioer', 'quchwj',\n 'pouxunw', 'bloxgg', 'xbsma', 'dtwew', 'xstorn', 'qfrfkz',\n 'gxusbsn', 'dhnxd', 'mhstbs', 'hekbtu', 'wvrrjw', 'yeiwd',\n 'patplsx', 'qmyiyi', 'mowboj', 'iskyd', 'bqhjj', 'povppk', 'vthpwx',\n 'uuydaw', 'rduxvez', 'vmcww', 'ylruvph', 'ymqosp', 'wzcvohg',\n 'lhepwta', 'bckhc', 'oiyyt', 'wqzfv', 'uduec', 'lkkbtzl', 'prvpbo',\n 'jrwstii', 'ijztoo', 'qwwth', 'vqzqiun', 'krnjp', 'zyanpiw',\n 'ojhjhvg', 'lohmb', 'thqtf', 'reptzv', 'zgkyq', 'lhkvy', 'cmjwl',\n 'fmilgpw', 'jrfawz', 'vrtzd', 'ezgfl', 'plzng', 'zidzso', 'civavlg',\n 'vtwopu', 'ljhckxo', 'nuydt', 'qembl', 'fiwrre', 'gfrgi', 'gzegiq',\n 'mltlqo', 'pcett', 'snbsc', 'msibcqn', 'beacrhz', 'vsycjt', 'gjqji',\n 'smcegol', 'zregkp', 'smcazoj', 'dziqad', 'jpuwp', 'hnlztac',\n 'vduitco', 'wyencad', 'bkdnnqo', 'cabzyg', 'mgpcwr', 'fxgvkxt',\n 'wlkcrdd', 'bhmhsy', 'gqcctjc', 'atafpt', 'vdzhmcg', 'ighxj',\n 'gfqpale', 'fohbrtj', 'mfpsgt', 'tarjocf', 'gyycb', 'qvqfryl',\n 'jpwowwc', 'jcgcg', 'gmrjze', 'nfptxq', 'hmjhxge', 'ieelj',\n 'suvkgr', 'nwjxe', 'tkepqm', 'extnpmq', 'rxzdvf', 'relzaa',\n 'hfhgaq', 'lmihlz', 'pacocq', 'dclxr', 'oknoem', 'pbpnnd',\n 'nleerfl', 'tvytymc', 'aamfnl', 'ufdnq', 'bxyzvyh', 'vksvout',\n 'lohxhf', 'sskgn', 'aawbv', 'hrvhx', 'wvoqf', 'vxkvh', 'oqany',\n 'bcmyd', 'epdddqn', 'zrlej', 'bchaf', 'hmftii', 'mefcrz', 'wbxvc',\n 'ewwnldf', 'cqecxgh', 'cnwvdmk', 'vetrw', 'zmogwov', 'lshlzpe',\n 'lijay', 'tcdqg', 'xavqixd', 'yjkhtsl', 'myjvow', 'cgthhd', 'taaii',\n 'iuuegk', 'lcypmle', 'wesrit', 'tybco', 'nhxysw', 'awkrj', 'jcmqa',\n 'porvo', 'nrypriu', 'vznnevp', 'hzklwi', 'vapuxh', 'wyfkn',\n 'albemu', 'ttfdbl', 'dbqrjv', 'cxals', 'qzitwf', 'ysunur', 'llsefy',\n 'cghfzji', 'jboaa', 'emhlkw', 'khhmgha', 'twlxgjz', 'pyujor',\n 'ozcax', 'fetvovo', 'mdhrrd', 'qdhdne', 'fiuvw', 'ebyxh', 'ldaothh',\n 'vwyjf', 'yjyljlu', 'ivroqg', 'qvpeyec', 'eemsdra', 'wavgeqk',\n 'bjejrqg', 'mdjimoz', 'fgopy', 'lgwodr', 'cunvszh', 'wiver',\n 'ghmog', 'jzgfyk', 'vxlbx', 'kvgbtn', 'cunorte', 'mtesdc', 'zdzmqu',\n 'pigik', 'smruadg', 'czjxlt', 'kukgaok', 'tsldpqq', 'luomo',\n 'ezbcvdc', 'tfetwes', 'uopzf', 'wsvezkw', 'wrnlvbx', 'bpqungd',\n 'jqnnof', 'rqhiomi', 'voulqb', 'ouspxn', 'chngpz', 'fbogfcv',\n 'nqhunxo', 'rydbke', 'ewduo', 'suqqwup', 'oxzfxj', 'kuwfwm',\n 'euiics', 'mvftoau', 'vstfbm', 'vnmtoo', 'muicf', 'bjbskxb',\n 'knbomlf', 'enrbtfk', 'hnaqe', 'vxzsr', 'gkqma', 'qygmn', 'ztkybmb',\n 'injggpk', 'enqrgdk', 'rkgoct', 'tgaiu', 'dnknoxk', 'iwuou',\n 'oxanccl', 'xestej', 'ekrqq', 'xbwhz', 'jkdvxfh', 'oybaay',\n 'afyhci', 'papffjq', 'bdppssw', 'qwyvjx', 'xmnnosl', 'kvqzjl',\n 'wcwii', 'ygfvt', 'tpabbht', 'kjmaq', 'duschjz', 'gguiof', 'wgfhve',\n 'joqmfjq', 'smqfd', 'ynlovlz', 'sgrzum', 'bobmux', 'dcppi',\n 'isdjrwl', 'lbevb', 'efqsirq', 'hlgfql', 'enmemlb', 'dbmfk',\n 'ibfpzm', 'rtdnooq', 'yicdq', 'xadul', 'dxibxzi', 'yyxnj',\n 'jhsdzxw', 'thltbi', 'kwhreyi', 'hrocoa', 'fnaalbd', 'vnwona',\n 'nnonm', 'naqaf', 'xgzzies', 'uhruynk', 'kgadfx', 'hyohzbd',\n 'hnajx', 'yipzh', 'ezdxaet', 'xbzppoz', 'rwnewxz', 'hlcbkmb',\n 'znyhu', 'zsqtpkr', 'gmyxr', 'rphyvo', 'bgjuz', 'nulpv', 'eejfoso',\n 'xmwcnes', 'xxxxnpe', 'jezkk', 'idfsxrw', 'qgzjtf', 'arpzpo',\n 'hxsanlt', 'emvotcb', 'sknzhvg', 'icitca', 'ivhdln', 'sqilerz',\n 'ndigw', 'bcsre', 'mibbep', 'zsczom', 'cgghjbb', 'fkylfgt',\n 'bvzofs', 'mefsng', 'bispbza', 'tsosgy', 'xopalrw', 'wserf',\n 'jbmlz', 'xidxny', 'ffmpjos', 'vddwxmd', 'netnsg', 'kgevsp',\n 'pguuv', 'cwisp', 'slxiyb', 'dmwaguc', 'jobwusu', 'uytcqrv',\n 'hzhsy', 'zrlsdd', 'xhxah', 'rxzij', 'zwdgy', 'ygmvkz', 'drkzbo',\n 'qpsal', 'tpxvl', 'lfmfl', 'sayjvlh', 'rdamym', 'ycuzd', 'zkycu',\n 'hdesec', 'unequk', 'lpkdid', 'vorxls', 'admsdop', 'rqnvkyg',\n 'krnqqtb', 'rxfms', 'xfthd', 'pxjbk', 'gpslrg', 'rwziwef',\n 'usxgqvz', 'baxxye', 'ocrkkrw', 'lrlgsp', 'ceyctg', 'rniml',\n 'vavug', 'jgircl', 'jrpnmsa', 'rywvlfg', 'prxnys', 'fkzmknn',\n 'ooelc', 'btvfs', 'yqepuvw', 'tmmmb', 'qmpzexb', 'zjckjvd',\n 'aieytbb', 'oafqq', 'szrcyh', 'czrxgae', 'ifkte', 'hfgajox',\n 'pwpnkqq', 'yqphogn', 'xuwthrd', 'mpcmy', 'qitdoa', 'avlzfrh',\n 'ywpip', 'dgeki', 'fgbnx', 'tyofu', 'xziqzj', 'qxzvqz', 'vtsqk',\n 'ipkld', 'yfhim', 'ebaegdc', 'ubhrh', 'ldejv', 'mtflwy', 'ocpyj',\n 'yopgqs', 'fkjxxd', 'njnnwr', 'nylkeb', 'taymdqv', 'ekpznq',\n 'cbzobmg', 'bucdds', 'qjozu', 'uvpghor', 'obhnu', 'ljkxbg',\n 'uqrxjtf', 'xwbxiw', 'oxsmcg', 'spchdd', 'pcuitj', 'faidq', 'tybmy',\n 'uygiyp', 'qloizj', 'cafgmy', 'smetd', 'kwcwb', 'tdabxf', 'fpmrc',\n 'lfjujn', 'vvmvex', 'mnsgdc', 'enjlgsw', 'ohwcg', 'kxjdaup',\n 'rotjarp', 'aovdoq', 'oviwq', 'qwaxs', 'bmazco', 'plcljsv',\n 'yytjhl', 'vgwjm', 'drnue', 'vqjgf', 'uqlsfy', 'bmqmfp', 'lkauwna',\n 'ozmqce', 'heunaxr', 'zaffbj', 'arbek', 'qjnllw', 'fdkhlz',\n 'wgmbwh', 'yceqag', 'ltjjq', 'yurggfw', 'puaafsl', 'tjiqkyt',\n 'yuzub', 'ytmrfq', 'ommmu', 'ipknn', 'iubnuab', 'dzthvc', 'zjbzpew',\n 'dcooev', 'pjydqcf', 'zuojlzy', 'zwjyfc', 'spmac', 'dfkbnz',\n 'fzriie', 'asusog', 'hdodx', 'drjpo', 'ddyif', 'chabv', 'ebvkwrr',\n 'burdjl', 'jjddi', 'dljzkye', 'samyg', 'zwgxcq', 'xtratwo', 'qfopz',\n 'xvlaw', 'laage', 'btdium', 'vzlnzt', 'kmvbzkq', 'kctobsx',\n 'kazbelu', 'yxdwrk', 'eslvjc', 'nhsdmvs', 'zuxqcc', 'hqtxovn',\n 'zrbdai', 'fgjxs', 'txecvio', 'kjxlq', 'dkuxss', 'mkbevn', 'pzmdqc',\n 'ihyia', 'atsub', 'twytus', 'nzooxj', 'qwuoly', 'fdoigo', 'zukhlh',\n 'mugeaxt', 'qqsfyls', 'qqtql', 'wrvphcx', 'nzjfhx', 'uequtk',\n 'fxuto', 'qnast', 'nveys', 'ltbrcth', 'toctdib', 'fbpnh', 'umxfgn',\n 'zvjuta', 'yeron', 'qzvswqk', 'gbctr', 'ryryz', 'zieknd', 'zcsna',\n 'jrhak', 'zfxqsj', 'urlba', 'lbozqf', 'yfcjaa', 'hazgy', 'gmmfzyz',\n 'zjvkyc', 'rvfdcf', 'daitab', 'hcxqgum', 'qwakp', 'ltbsjwo',\n 'pqqtygx', 'upxcxao', 'qylot', 'lmxqc', 'dwzcd', 'tjccm', 'mqcpap',\n 'wgxqtr', 'ivycvxy', 'wdykg', 'snvqka', 'jxtvtsb', 'jnyowsq',\n 'iwfuoig', 'cuoixhu', 'fzwalg', 'djhrar', 'sjmahk', 'dyusf',\n 'wrxqvdi', 'ftytlor', 'jsjbv', 'vjbebg', 'agvsn', 'vvmpgm',\n 'gsgjopk', 'vbqvhy', 'afopf', 'zybfuz', 'aqsgc', 'ytrjsvn',\n 'wlhdfr', 'vdhvl', 'jrlvr', 'cscxwf', 'yhgbew', 'wupbl', 'ssuhyvv',\n 'bhcirzk', 'oykwk', 'ijbto', 'qsnpgw', 'otwzage', 'ytqzh', 'rgwow',\n 'bvhgkwh', 'fvawxie', 'fllxw', 'gfcqf', 'scoqb', 'qubrq', 'gdxjtp',\n 'ahrpck', 'awnlgi', 'cmehsyp', 'dwmytpy', 'firyeq', 'oohwhr',\n 'caelk', 'mqemvs', 'qflkzi', 'tfpibll', 'ybhzd', 'ctsxri', 'yurocj',\n 'dnlnl', 'ydmdva', 'xkaotl', 'xovax', 'ypynrqp', 'kwfzw', 'fbgsmrc',\n 'tutime', 'rcugul', 'cvewno', 'typhbpa', 'wazew', 'flzfs', 'wxxbza',\n 'ogjfkl', 'vjlebet', 'imbubm', 'xinyncy', 'dqmxfy', 'buhagzh',\n 'jjadpos', 'gejyz', 'gxshqk', 'wkwrs', 'dqeriqo', 'dmixr', 'bysjih',\n 'aoloq', 'ddwhsxs', 'nteqv', 'cqagf', 'ditsrn', 'wfxgl', 'jwjqb',\n 'rvkxj', 'rxapr', 'yrlkip', 'npquasb', 'nvezlr', 'gmhchcx',\n 'lodfihi', 'dheypxa', 'plzjykh', 'qopsthg', 'zsnes', 'raongg',\n 'zrpnac', 'tzmtltj', 'jsecdn', 'rzudh', 'hkcyic', 'xsxmw',\n 'reeuwpn', 'grkwrag', 'gvzzbsq', 'lrfta', 'aqyvbkj', 'ytgfu',\n 'wcmvd', 'olnvfi', 'hhgmhb', 'kojmepr', 'wpohl', 'szhgg', 'hymiblu',\n 'lkwjr', 'zulqpz', 'sdcqjo', 'olgsgez', 'lxkpqci', 'yxcgn', 'gmvex',\n 'fskpppe', 'utzto', 'axncvp', 'lcyahba', 'ydeae', 'zvzar',\n 'ghfkkqv', 'ryrpg', 'gucpbq', 'reofjz', 'cdnoo', 'dchhh', 'byiwd',\n 'cqbhok', 'ksfnoa', 'xsmmlr', 'qyvdfqh', 'dzshj', 'bpifnzh',\n 'uxmoml', 'jdxvojf', 'ihfll', 'vwesfof', 'zynnpb', 'fwzra',\n 'rxlgww', 'vkmjd', 'hcjgzt', 'mkapfl', 'ffjqlf', 'wulaebc',\n 'gurramv', 'tufkzai', 'bxprqek', 'nkohv', 'abgfwyl', 'slslg',\n 'wirsnh', 'pykvuh', 'fdrwk', 'gtmgsxe', 'dxsaab', 'lqiryty',\n 'aoezg', 'tzhugcg', 'uoarf', 'dwhsv', 'rjiuoi', 'ycgcdnf', 'rtfmwz',\n 'amkjc', 'woogtdi', 'deprx', 'ucknu', 'womfm', 'xdeev', 'qapxpuu',\n 'ngulnk', 'fgtxyf', 'hnyabid', 'cilmy', 'wrsewtf', 'luvtmo',\n 'wftuh', 'ifoeeqp', 'dtfdhhl', 'rwnburg', 'fohkkul', 'frqqi',\n 'gsrcyc', 'teuync', 'dvpvak', 'daqjki', 'kksscp', 'somsde',\n 'tyfvck', 'ftfekl', 'ahncv', 'yvosm', 'qgllvg', 'ylfwv', 'jenqns',\n 'lqovrnm', 'iyger', 'nfvtsv', 'bknxmqj', 'pfzybdr', 'hqjol',\n 'chlpk', 'etgrtqa', 'msuxdx', 'vnoatf', 'ypdzomn', 'vsshmg',\n 'rfkipq', 'jvpbiz', 'vbskd', 'edsoixj', 'uowim', 'hqtsj', 'inbsxal',\n 'ookrv', 'ipotdnk', 'kmazqd', 'jpfghb', 'gvmnnpv', 'juvwa',\n 'xtkvzw', 'ejqcl', 'ebgcnt', 'ztuyu', 'dlzthw', 'zzipe', 'iaxwdxy',\n 'htynwkc', 'lefbq', 'pizfr', 'vttrsv', 'oagak', 'eqlrom', 'vttefg',\n 'dsrmk', 'oekbe', 'cvugzk', 'diwvz', 'gxmfob', 'vjowzm', 'mjpop',\n 'uznhz', 'kqvjwug', 'wjqvxfg', 'jbpwezu', 'wsckdx', 'slqfomn',\n 'omuxk', 'zlgblso', 'kvitoq', 'dmafq', 'djxmzk', 'pjqfegq',\n 'yjrttas', 'siakcx', 'iutiqk', 'nwfdj', 'gbgtazk', 'cpqtf',\n 'panmlr', 'aqubhsg', 'iwdim', 'nqetym', 'mwazh', 'thyhy', 'ydtxan',\n 'xfoin', 'lsosc', 'esznfa', 'xgdisi', 'flvbzh', 'mpltx', 'iwjpsqp',\n 'udfycf', 'rntmc', 'ltflwu', 'wkgbaw', 'bcuzt', 'hejxuhb', 'lguohe',\n 'klnhb', 'mjump', 'avcwrol', 'yrcqlc', 'ihxul', 'avajh', 'gtpauet',\n 'iemzk', 'rfdub', 'gqnbk', 'cfcmg', 'iobyh', 'iruuapf', 'tyifwt',\n 'sbdtp', 'mngcpmb', 'oaqpolm', 'mmimmh', 'gxknadi', 'bmxhuu',\n 'ulyoa', 'keidy', 'vsnfk', 'cnnnfty', 'pkajm', 'ddgeecb', 'prxidqd',\n 'wmenvhd', 'akjcqo', 'tnekfef', 'ipvsi', 'pzjwq', 'wmmct',\n 'erdjnuf', 'vgeaqs', 'nlbdx', 'dpvbe', 'dgeqz', 'aiguzh', 'akawppx',\n 'tykrjcs', 'gvavo', 'hkyle', 'yhedx', 'xzqcg', 'gzdxt', 'csssbk',\n 'tmekrmv', 'lfsgo', 'iizahz', 'aszfd', 'aybqnsl', 'vadwxsl',\n 'ulmiii', 'xaxdugp', 'sfnnsbg', 'dkyruh', 'qhpqu', 'amesjd',\n 'evjuki', 'vtqjw', 'aoabp', 'qnsuhe', 'bplbx', 'fdqok', 'ozkhgib',\n 'cggwzys', 'nbknjay', 'ooambw', 'evmvegf', 'htdlxik', 'kahcume',\n 'bojpn', 'bhipie', 'hdyjslw', 'pbkkq', 'qwszl', 'fgkbzsd', 'hejdx',\n 'vmcfhgx', 'puzlmmm', 'meffil', 'boakbiz', 'eczot', 'fvkkit',\n 'jebfx', 'umvkjg', 'uikgs', 'rycgpf', 'rfmfgmy', 'nveho', 'bgywqen',\n 'gepfma', 'vquyq', 'wcercbw', 'wbpjkxc', 'rqloeda', 'omclokx',\n 'hvotwp', 'tvqfxxu', 'qrtghk', 'hggme', 'arnmfnt', 'cxprj', 'rspdt',\n 'hlgfq', 'dmqel', 'pcerxk', 'ptqjc', 'wzreko', 'kahks', 'xjnzo',\n 'xzzye', 'xbdeu', 'koiwkv', 'jlwkkjr', 'xzdixoc', 'xeedvrm',\n 'mrtnhqi', 'jaeann', 'mvubp', 'olklqf', 'retbgcj', 'qxxlhh',\n 'cqyyoy', 'ngwikg', 'qijte', 'sjzck', 'zkmkx', 'ongtzf', 'tanow',\n 'smgntvq', 'urfgt', 'xwcroa', 'kadcpd', 'cxhgo', 'walku', 'kvvcsyt',\n 'elwmuxk', 'bfphtm', 'vzeumuq', 'sknvev', 'vbsnfd', 'grmbg',\n 'vjahwt', 'dmcbmn', 'smubz', 'jobbfcv', 'ujlkm', 'lcthh', 'bauuqdu',\n 'kjgzgtq', 'gicjz', 'nugbax', 'kbnjfiu', 'sqfpein', 'obbgfww',\n 'ykggxjx', 'irnmog', 'xniuv', 'rqiwycq', 'hzlgyu', 'yjtrttv',\n 'satym', 'dgqhlkk', 'rghal', 'tbekx', 'kkwmo', 'eahwhks', 'bpvmbur',\n 'sqtgkj', 'khboz', 'enefr', 'vkzqvt', 'wfruavu', 'ninomu',\n 'ypktaoa', 'mlpmoit', 'fxyhjfp', 'fgnpp', 'txieja', 'dprnj',\n 'bgyrp', 'zsqwqrw', 'stqzki', 'kwiayb', 'ulbsn', 'aetje', 'vwzbb',\n 'tedwyqs', 'cymiruy', 'jigpoqx', 'ypuqsc', 'weletu', 'gvibea',\n 'chhuldm', 'baylv', 'wdhovo', 'imfqu', 'meodnsk', 'jhlckqw',\n 'jolyfh', 'jsfkrhr', 'tnbfzvs', 'egcfht', 'qnzmyr', 'owtrqu',\n 'oqaqu', 'xftys', 'goxfftm', 'sgbnp', 'bhfvaz', 'gospa', 'jwzlvwk',\n 'lqncoqd', 'xxizglc', 'bwffm', 'mhpggzr', 'kdaoewx', 'anviou',\n 'mqiij', 'wkskpn', 'enougdh', 'vldnn', 'gbfgz', 'ejmbh', 'qsdrvsx',\n 'mrvbz', 'cqlufpf', 'kbgjlu', 'njgna', 'admrmk', 'pwwsc', 'gxkot',\n 'pdjwh', 'ejwxt', 'bpaxufv', 'iwjzs', 'xxfsg', 'vuhgh', 'srytgb',\n 'yesvlux', 'tggnch', 'cgnbb', 'fbzbx', 'aomoqf', 'zkrvrjg', 'ueaoz',\n 'dppacnl', 'ewovhxz', 'kbvee', 'ixeeb', 'gwgoqm', 'hlwlxe',\n 'fpmkrk', 'wzjsr', 'ispwe', 'garofu', 'jcmpec', 'tggeo', 'yzdeo',\n 'axpmln', 'zhnlhck', 'duyqcn', 'tpqwqi', 'jvmaj', 'bisgoy',\n 'mpwmurb', 'olqla', 'ecapwan', 'kcpxn', 'xcapin', 'ooctk', 'sgqql',\n 'vcyyjxf', 'ejyom', 'jsgtha', 'logxnjg', 'nypadhj', 'dprmk',\n 'cqkuzb', 'gratv', 'tgkjgu', 'fttcafm', 'tpryi', 'ubbhw', 'uwcuyn',\n 'zkgohs', 'snfesz', 'ifrex', 'tkbfz', 'fvvkp', 'otjiq', 'lgomjjv',\n 'ertracf', 'bregu', 'kkbizb', 'hyhvn', 'zjcnxfl', 'mceskuj',\n 'lmupdq', 'zdzqzgo', 'yorppew', 'fpwtjd', 'dxvyzt', 'bbnnu',\n 'pkycae', 'ucvapn', 'dijmkb', 'nvwwpr', 'bufkw', 'zhono', 'vayxf',\n 'hlfwkev', 'klkvkj', 'yzgpwg', 'lcbqr', 'tkkfi', 'pcgljx', 'bhduxu',\n 'rgfipts', 'hkjbrr', 'fobvy', 'wqmqhxo', 'yjgvypg', 'ehgoizl',\n 'ipiibzh', 'aqxbxtx', 'lrtin', 'fyyuypr', 'pyrocgm', 'kwqbg',\n 'ukccw', 'wgsbpvx', 'pcoivrv', 'okhxaba', 'bbuaibf', 'ccvfm',\n 'phpst', 'yxtqiz', 'cdfbo', 'sijfljn', 'gdlhn', 'bqmbced', 'tiejf',\n 'aurqer', 'olmyd', 'prctay', 'lwflhi', 'bbehvta', 'oxoda', 'lklyc',\n 'rzedhp', 'kairil', 'envan', 'wdcwfk', 'xoroddb', 'womrlr',\n 'ruxebe', 'jnpywrd', 'wrifvz', 'zkewcd', 'vllfrn', 'uvdvjh',\n 'bglpya', 'vzokkbw', 'apaoqt', 'xpjizn', 'xoajmd', 'xapjwc',\n 'jcknwg', 'bjpreep', 'ffkua', 'ukcbah', 'bugvkrf', 'cbmmfs',\n 'cwaczhl', 'nsqaj', 'sjeikg', 'fayqif', 'slowoh', 'xjpvkpa',\n 'ynunjle', 'bqavt', 'nkpqudr', 'neikvd', 'yuqlzg', 'pdxbtrb',\n 'cashlog', 'iqiqy', 'smjmxv', 'zbtpbr', 'zzamzcv', 'jmakg',\n 'txfswc', 'pkaym', 'swlde', 'utann', 'mqgpjne', 'pslfvek', 'nbiqhb',\n 'bzsianu', 'wnxgbi', 'ahkeeiz', 'dqdfjg', 'bptdg', 'pwita',\n 'uqyflq', 'txabjn', 'yznjmve', 'mukcqqf', 'cxonbf', 'ixuewjm',\n 'pzlcat', 'eikeeo', 'scwsoa', 'uaeyw', 'oeorff', 'gbqgd', 'qboqiv',\n 'hiulpb', 'dbbdm', 'qvdxx', 'aypxbcn', 'ykjwdbg', 'pvfxn', 'shrqyz',\n 'zaxtu', 'pfefgww', 'jwifrw', 'zxuud', 'kpkwhlj', 'lwptgd',\n 'zpdmvsw', 'takeb', 'ynehl', 'kixtod', 'fyrgm', 'qirzmr', 'shyvec',\n 'xjgzt', 'bwfvht', 'wyehh', 'renzc', 'nnibax', 'slhfng', 'yjtecc',\n 'lghvbzf', 'qroxvun', 'mlsed', 'rrudho', 'cyffhh', 'tjlxahp',\n 'xmaepzk', 'jvdzh', 'bbvegrw', 'cebcz', 'odjpeam', 'guerph',\n 'tgmphgo', 'ohtkqq', 'jcxojz', 'haeheae', 'erydxni', 'hatjxx',\n 'kwmgkjw', 'wmezvy', 'hsuuvfi', 'ineek', 'grkxmhb', 'alxkt', 'rmspxdg']\n ) == 13956\n assert s.minimumLengthEncoding(['me', 'time']) == 5\n assert s.minimumLengthEncoding(['yiyqbv', 'njqvawn', 'wnlovvp', 'vogum',\n 'jpolc', 'zleec', 'sxdrww', 'rbowr', 'xsjorra', 'kwjsx', 'vornum',\n 'echku', 'kuizegn', 'rhuvv', 'eemkh', 'yshht', 'pbixoa', 'cmbxvtr',\n 'iupia', 'nmcbq', 'mgrjsx', 'ejvniwt', 'svhsel', 'kazenhf', 'fevpm',\n 'xcwqfgw', 'ozikzc', 'mywnmqt', 'taorwjm', 'gcshacq', 'fgtasq',\n 'qexygw', 'ljmbari', 'zfjudos', 'rgxuzy', 'kmzryaf', 'exjfd',\n 'mcqnebz', 'ptoim', 'zglfi', 'fhneaz', 'rexgc', 'lhplwyr', 'dthdp',\n 'jizetec', 'obyzg', 'rqupa', 'yphttge', 'wdcdn', 'wdomtr', 'hchbd',\n 'ytyra', 'upytftl', 'swbbi', 'qpcybv', 'dcoxspd', 'dftkf', 'nwjfmj',\n 'ojbwy', 'zofuy', 'adqkt', 'kpcply', 'aeukw', 'fqblb', 'xurrbpo',\n 'veioa', 'puzvl', 'bnzvlax', 'tjzsdcw', 'jarqr', 'orxjbg',\n 'ilrqdri', 'syjuoyi', 'htoqdco', 'gwslw', 'dpqyf', 'jnkhv',\n 'fpqhpr', 'baewnvc', 'caunsf', 'qhbpe', 'wlckl', 'lmoroqe', 'ddlak',\n 'qipwbfp', 'cefqs', 'surczp', 'jtmfuro', 'ezhqau', 'dlsco',\n 'hywoqh', 'lnifq', 'hvfmu', 'cqjdkok', 'tggdact', 'rwuowdk',\n 'attnl', 'lwhyq', 'mqtsc', 'bmwajiy', 'nyohug', 'vvfpt', 'lbyazu',\n 'sarwago', 'iccztck', 'ugsxcw', 'rpwza', 'yofmlll', 'ulhdzhg',\n 'lbaqk', 'bwxxwc', 'dmsbawg', 'tjloy', 'imbrkul', 'xguke', 'shlkuq',\n 'lizjcdu', 'kmvykl', 'ilqxxjm', 'rtbvvqt', 'qisec', 'zobzr',\n 'thwntt', 'afpifh', 'uwiiovy', 'hgsyecl', 'pdgnm', 'mqyesch',\n 'suexztu', 'msguuwu', 'yrykkv', 'xtoommc', 'muteu', 'bamml',\n 'kkhlb', 'jfrnx', 'wpytor', 'zzogpt', 'yryxxt', 'hzqofjd',\n 'ehtildc', 'ptclf', 'nyltvd', 'nrret', 'qqqqt', 'uuxunf', 'jajxt',\n 'lzdvlc', 'gpdtjug', 'hjsso', 'jairua', 'qarxuey', 'rpwwjwv',\n 'cjqypep', 'tuzgcs', 'oytqxb', 'rgfmud', 'stnwn', 'tzzaop',\n 'jpuopzg', 'qeywd', 'spnstrg', 'dfwgntg', 'yjyqk', 'ioowc', 'duqfg',\n 'gmqxe', 'xhlbby', 'liurjk', 'vdujfm', 'xxyyn', 'omapgc', 'koemzbz',\n 'ziiyako', 'pjmhfrv', 'bshtfgj', 'ihjvt', 'pnipuw', 'fajiuj',\n 'rdvcqzd', 'mgknns', 'ouwkm', 'ejnklwc', 'osepl', 'gplpyvs',\n 'paxrddg', 'gsjlpd', 'lgnmgl', 'yifeeer', 'hhnwlol', 'fcmxs',\n 'ilinwgm', 'udhfdtq', 'ceefc', 'xweqx', 'jfelwod', 'rtywfjo',\n 'kzwrgqx', 'fcjriov', 'fzytqv', 'zcpcddo', 'scpyzow', 'kbzegu',\n 'gclwr', 'gmiwlp', 'rtpka', 'yiywuyy', 'qceot', 'dtrgn', 'ntwbu',\n 'fxobd', 'zmxwza', 'qcksyz', 'wgbtmm', 'pzorve', 'hztydc', 'jqlay',\n 'ijdkbk', 'uzjrps', 'gfzibk', 'gsxqj', 'kgjrkdd', 'smdeuk',\n 'iwizewp', 'owjie', 'kcdccu', 'ifltqr', 'zrdfbm', 'pznbcsk',\n 'mtkpi', 'cpasir', 'flrxrm', 'uxcxnv', 'htlfcp', 'ltukxfr',\n 'ftbbha', 'jhgjgyz', 'qjreroc', 'vcvtbid', 'nrhlq', 'gtkpot',\n 'gyplqqg', 'lnorig', 'fixhufv', 'ugcug', 'ndfug', 'wuorhe',\n 'owocnkw', 'rcnbf', 'ioiiiui', 'kakwtne', 'svxtt', 'wdrxogm',\n 'ibrxs', 'bddqi', 'jeguac', 'hlftdw', 'nutgfjw', 'krrzvf',\n 'amxuloc', 'deozdoe', 'ovsvk', 'sfqsl', 'slgiw', 'jbjujag', 'mhiru',\n 'uqksech', 'davosw', 'nlueljv', 'rhtvdu', 'ivdpdqa', 'qnbenpq',\n 'dtapqq', 'hwwfpxl', 'oyrfosn', 'goxgmgo', 'tbvutl', 'cbbbcm',\n 'iiugpk', 'hinkem', 'vvaitk', 'pskyf', 'hdnekg', 'nqhfn', 'dqbozx',\n 'zcwpko', 'kafyu', 'jfegubk', 'nofqzsk', 'ujmxxg', 'akwzemu',\n 'yvhxb', 'qqlwofi', 'hmoecj', 'qwgtlc', 'jepvygq', 'uzggm',\n 'fztiews', 'lvndvf', 'vulax', 'znqudh', 'whgqi', 'noguo', 'vewkx',\n 'uruvgf', 'ubohmba', 'aulzi', 'flvfdlq', 'yspfie', 'wugif',\n 'qndyiwa', 'keihmct', 'rggvn', 'ojjmuoh', 'sbbcl', 'cdivmoz',\n 'vkusmp', 'mfddp', 'kgohwvp', 'rjbbxw', 'vsgptj', 'hbyjoz', 'gufrv',\n 'orxiv', 'fxcqfw', 'okppik', 'qlouw', 'lkryigo', 'qccvc', 'ixcnodg',\n 'wlfilts', 'ahqtevp', 'kkbuha', 'oehaez', 'rzczib', 'vxobk',\n 'wmetvjs', 'xfjgeq', 'eadzl', 'aeqdvch', 'czojfq', 'hxshidl',\n 'ofswsj', 'iwbqcmg', 'schhwtt', 'ltyth', 'wiccu', 'akill', 'zaaji',\n 'qepvfa', 'mpvrkeu', 'dcpenm', 'wdhlk', 'llqbby', 'lronwkr',\n 'rwtguo', 'ofnvs', 'lxdnwzf', 'dctmilf', 'zhckjd', 'hajsuac',\n 'wpylhy', 'zhipvm', 'ihikr', 'zzwjgvr', 'gdglrn', 'skhow', 'tlqtjl',\n 'uypli', 'evdva', 'civide', 'iroihm', 'lvuzid', 'vexat', 'ngmvrz',\n 'szdhbt', 'ggrbz', 'bsmovlt', 'kguomvl', 'onzvx', 'nobgxw',\n 'tqxemc', 'vbiyx', 'fpzpf', 'ogtvf', 'yuthri', 'xszbn', 'xcuhj',\n 'nosnpbp', 'mowsxg', 'tfalyy', 'kxombgm', 'cukrz', 'krmseq',\n 'velzh', 'kmufxj', 'nvxlkq', 'ualvras', 'wytoucy', 'qicqyym',\n 'pbeujtv', 'haojnbm', 'xnfffpe', 'wvoiald', 'rlyvf', 'sxamoxw',\n 'ztqnmp', 'biiavx', 'lnjnzs', 'arqdjdy', 'pkrgokc', 'qxswouj',\n 'dgqah', 'mnhzo', 'ggilb', 'qscrd', 'ggvkimw', 'qlxjys', 'wximi',\n 'aqlhio', 'iavtvy', 'grkqf', 'dwrtut', 'uozutfc', 'fogxpdb',\n 'ydtntlq', 'vnmpmwp', 'gtxhwq', 'mlpihx', 'yfpjlz', 'hdvcquq',\n 'nunny', 'wklasgp', 'wxduo', 'topsqf', 'tngcpzc', 'mcrut', 'pdnsmt',\n 'kavaok', 'seiqsqa', 'bhgkiyt', 'mawvhtp', 'domcnrm', 'fgusghc',\n 'wdaufwz', 'tzpuks', 'kisndyz', 'fwyieu', 'wtdum', 'ytxhl',\n 'yhzkmuv', 'nppnqe', 'ccvhj', 'dautnyq', 'hkaliab', 'kngan',\n 'ebmhiop', 'vsdkcef', 'nmpcnd', 'vxvnl', 'cwcgu', 'zsuneh',\n 'qjgcmd', 'awvba', 'rzbisxo', 'oilqrj', 'neiazlm', 'hlyrl', 'tmiht',\n 'lwqxxv', 'gyblrw', 'gnnjkb', 'lrxiln', 'xlwlseh', 'npfwcvp',\n 'yjcdhw', 'rzndd', 'orlhmip', 'gatuojh', 'osotgvv', 'owksz',\n 'kcocizf', 'izlev', 'smigns', 'wtxfwo', 'knwizte', 'mqjojzp',\n 'lkezye', 'xqldbu', 'cvbpyl', 'aoipbz', 'asrupt', 'bdwkesh',\n 'jpaykm', 'pksbg', 'gdbsibd', 'lfxpwk', 'rmnfph', 'yzxwke',\n 'xjwyusv', 'yetar', 'sytdz', 'pnystzi', 'yntcqo', 'egoorl', 'aydxu',\n 'rfdrfhe', 'flzkos', 'mmjgev', 'fbjwmvi', 'jeouc', 'lcmkri',\n 'aggsb', 'aaeazai', 'amyxpey', 'onxqpg', 'qrjpxq', 'zanea',\n 'niwsgtv', 'nsqja', 'utgskd', 'hlcum', 'frygtl', 'xjmqetz',\n 'upqddd', 'vxzdstm', 'hcmtera', 'ejstou', 'xkcguf', 'bokigdk',\n 'vurnv', 'zsgrje', 'nbxlf', 'tpilcx', 'lvepux', 'xacdtp', 'amdgx',\n 'ubbvnx', 'xmvznh', 'tlprri', 'sthkn', 'xhoad', 'deotaxo',\n 'pqzppmw', 'xlcpx', 'qwzrpyp', 'lujabeb', 'heskwyy', 'mzzaaur',\n 'vnestcs', 'rryphdl', 'ibdiabi', 'eoiyt', 'znflx', 'clougix',\n 'zzadxw', 'lrrgtf', 'lsdoakf', 'yxfmqx', 'qhnrry', 'ktcdmv',\n 'veygqu', 'btjlo', 'fcspsc', 'gozoazm', 'xcsqgz', 'aazae',\n 'nkuvask', 'mzdgjq', 'sihqdhy', 'zadrwzw', 'gzcyuea', 'lpgccic',\n 'fqtfuzw', 'bjoqpkc', 'oydpkxc', 'sugnnu', 'hyvygf', 'axkxo',\n 'rsmzb', 'dlhqmac', 'gbqby', 'npqkj', 'odbtb', 'bdsib', 'zyasxv',\n 'ifxqcc', 'lmnjwhr', 'ibuyu', 'uzhle', 'ccpwhjr', 'vhrojnz',\n 'fkzfz', 'fyesm', 'dnvipvm', 'jbbqn', 'qdkgl', 'xkvvgq', 'dphugaf',\n 'soxbfun', 'rbgokx', 'biveiz', 'vbaqtn', 'qapydgf', 'llldu',\n 'ottjpzu', 'fwjuc', 'cawio', 'gbkwe', 'rrnnxer', 'luviy', 'zsalse',\n 'ckwdeox', 'ozhqocm', 'vtozfwz', 'jztole', 'ydqei', 'bfugz',\n 'psawjp', 'dzlyrwp', 'izuyrne', 'rbwcfr', 'vdvte', 'usjbqs',\n 'zzovkxr', 'frfkwk', 'mmtmdd', 'sntka', 'wachbzo', 'rmzvj',\n 'scbngo', 'eqiuiwi', 'qfakk', 'cckcmt', 'owhzow', 'rejdlw',\n 'iprsqdq', 'twwaldw', 'mfilzyk', 'jygvx', 'iewbo', 'irhko',\n 'zpazqhn', 'ndqbg', 'ayzxqdz', 'zvpbh', 'maapq', 'pzitrfm',\n 'qsgsurv', 'viwcfff', 'wpgenms', 'tjmvu', 'czuemc', 'infxoo',\n 'avhbw', 'nugkqx', 'xubakjp', 'ndask', 'utaqq', 'njhuxq', 'sdvuex',\n 'tfmxqp', 'bydovjo', 'bizxjsp', 'zoozxyv', 'jegei', 'gkpqobw',\n 'psumbtg', 'gkgoh', 'sgcbpql', 'xxkhy', 'kdorkr', 'hcomj', 'ulrpyv',\n 'rhplil', 'tyyochd', 'xhzul', 'srdjmns', 'kgukye', 'yepvs',\n 'xnobsjb', 'umxmtub', 'wvqasr', 'igftpzw', 'exhecn', 'rreee',\n 'jpxuvxh', 'jriqf', 'akexunb', 'ekvdsoe', 'ytzvj', 'vfrlyae',\n 'pmfai', 'biouzle', 'xkbce', 'clzyi', 'xhjoso', 'wmxkxb', 'dqzzig',\n 'ydtby', 'gskwj', 'wlkwbz', 'zepvllz', 'zsgqp', 'blntawk', 'eynmil',\n 'bdqyp', 'wgtnqbc', 'rrgaq', 'gtafuzo', 'qdiko', 'kkcsdo', 'zwqhs',\n 'kugzbmf', 'wtvvs', 'kqsdx', 'mxsuxiz', 'pgbgjfe', 'vodfr', 'qbvwu',\n 'vfwbhgw', 'ayojye', 'kolzfqg', 'xnbecj', 'akbcnf', 'uutrn',\n 'upmesa', 'marqej', 'bbucee', 'bazqbau', 'qikgsyf', 'oeayzn',\n 'uilxnzr', 'vpnxknl', 'btgtxgh', 'vjaav', 'zaxtzah', 'msweps',\n 'awduwld', 'gzaep', 'ngvgc', 'qpoqdgn', 'kimndg', 'qilmmpw',\n 'oafhlyp', 'nyelgvw', 'onymk', 'feycbc', 'dhcrx', 'siqpfly',\n 'tyvycmf', 'huctqp', 'uscjrp', 'bbptd', 'msdmu', 'xlxhye',\n 'xnyzcox', 'kyskda', 'injdkmp', 'jiwus', 'spjylwd', 'eqcrnt',\n 'snfiu', 'jvwvge', 'yfeaw', 'mmdnsjj', 'suzdw', 'xiupf', 'rjwjhng',\n 'tqvasy', 'rmibpa', 'zuqax', 'prpndnp', 'efryqe', 'pwuqfy',\n 'wpqlfs', 'aeswq', 'cxkeiue', 'jydxzfi', 'tzfvwp', 'zzgtw',\n 'mupiusx', 'sojavt', 'dxmsgq', 'migjiyj', 'kixjk', 'ywwvcpl',\n 'khzcuo', 'oykhx', 'fochin', 'foxbfkc', 'sizjg', 'wrjcvr', 'ceadd',\n 'tvfqgxq', 'whzhche', 'dcoeti', 'mpilfib', 'cphie', 'ucpnjm',\n 'ajltvx', 'kpizym', 'vevfsrs', 'jznrri', 'yvhxomr', 'cbcnk',\n 'yuwuhu', 'jywuzed', 'kqakusq', 'jrnzgfo', 'mjimzz', 'mfjybnd',\n 'ntqyq', 'junxxck', 'myvqajv', 'kvuqs', 'obfxw', 'jwuba', 'vnrvzvy',\n 'aeric', 'vtgda', 'nkrocpt', 'ahitg', 'dzxtr', 'zswwc', 'yhxap',\n 'fdhiwr', 'cpxtqv', 'izbmo', 'zyioo', 'vysnoe', 'ouuyvj', 'cumdhzn',\n 'dbsmph', 'cktjem', 'vbmxy', 'utgfyhc', 'rqdeorp', 'btnlmd',\n 'chxwlt', 'nsghoqi', 'egycsm', 'wkanat', 'lzjyf', 'donyx', 'cchqsa',\n 'xozzz', 'yzmnf', 'jfzuh', 'dpcpg', 'hlahz', 'vobopk', 'lssfeli',\n 'ccttzi', 'glzgqpv', 'oyqzug', 'qqhkrr', 'euwotv', 'hwbmtz',\n 'hiylhly', 'bppzne', 'yetyyvs', 'cnbwcby', 'hzblk', 'pfjmxt',\n 'dsxvt', 'vvkju', 'zjrfr', 'gdbhb', 'udoad', 'nbhpzfm', 'iwetbym',\n 'atmly', 'tnxli', 'myegb', 'hiwqsk', 'btrajk', 'nhrmwn', 'ftmbecv',\n 'xopht', 'eiikqy', 'qizanwa', 'cwxiatf', 'jshjva', 'llrtkn',\n 'zhivu', 'lmwiu', 'oaeaqz', 'oxotfub', 'jnkafm', 'juhrmq', 'mqzbtw',\n 'puiaxty', 'dnahvoj', 'gaxhz', 'xfnay', 'iqmlnlq', 'xudhcg',\n 'izpkz', 'tqttmt', 'bwnbs', 'fdufd', 'vhzyymh', 'zhqtxr', 'evbcrv',\n 'xvnma', 'dgcwy', 'cwxzlbz', 'oodiol', 'teyim', 'kqqfjub', 'ftsqzi',\n 'arfztkr', 'oqlujx', 'rpkkdov', 'ptoff', 'ivxaxr', 'nxeept',\n 'cacpl', 'tehir', 'spvggl', 'qfzxkn', 'bhwkukx', 'fkdpuq',\n 'xdrngre', 'fnfplq', 'dzbrl', 'ufgxu', 'sciec', 'fgdydvw',\n 'nmpaqxi', 'ydsvfv', 'natjz', 'lruyvzf', 'xznznxp', 'mhfrh',\n 'kddsk', 'uwatn', 'uklzs', 'lnuta', 'ryizc', 'cvwko', 'tnzpk',\n 'ywpiv', 'vbvcagq', 'pzolw', 'nmyfhg', 'cshkofj', 'ksptw', 'kqejh',\n 'zgzjqzo', 'mxzrw', 'enabosq', 'vmubgc', 'sfzcj', 'hewvk', 'ewhrq',\n 'oifnsmi', 'izdnvu', 'cshgtk', 'mqotuhd', 'gnqgj', 'rxailbm',\n 'iyhxvtu', 'ncjzklq', 'zjmnoc', 'awqwos', 'ugujppc', 'spbvfwl',\n 'gntsvo', 'euksu', 'qnvneph', 'crhmf', 'brktmf', 'mvgmr', 'yzcskrp',\n 'tihawec', 'edqmxpn', 'fxyymlr', 'dzfkucm', 'prldz', 'gplrlhz',\n 'bohwr', 'bhebbk', 'mmecj', 'segydd', 'ptslsb', 'pyhgw', 'cwmrq',\n 'mjfhflh', 'xhuid', 'npxmb', 'izilq', 'dczhqh', 'tgfnxtb', 'zrylvo',\n 'lctxrar', 'ylhrbii', 'rfxedv', 'llvhzjq', 'bjocv', 'wbnex',\n 'cnohnf', 'xahrl', 'rouvwyc', 'hbhovgv', 'dhucp', 'ncmff', 'ncsskg',\n 'gsjbyin', 'lroxscf', 'whfaenl', 'vsfultg', 'floxkpy', 'captoai',\n 'qwolyex', 'ggaypn', 'wzunypd', 'pjixeu', 'gxnjkoc', 'pqiqhn',\n 'xakjmgz', 'vqizkx', 'gdzcxr', 'kyxwdd', 'pgxmazn', 'qeuwf',\n 'bduknm', 'tcrcn', 'nehgee', 'wktbcgu', 'jwqltdt', 'wczkai',\n 'drkqs', 'qhdqnn', 'oobxirc', 'lbunv', 'ifscr', 'xnfpbrw',\n 'yrrdbax', 'fbocs', 'tewne', 'iobixe', 'zgosas', 'yhesn', 'xlqwd',\n 'pfcen', 'slsjffx', 'ilwatrc', 'mhsmgp', 'iteghl', 'aqhufdl',\n 'kxgpqcu', 'ryrcgp', 'azidf', 'smlnl', 'rocxvbt', 'iutfc',\n 'loapgbr', 'musulp', 'dqcnj', 'tpgbkfh', 'wvskii', 'itkfopo',\n 'kytyb', 'rzahbu', 'aewptd', 'ohergbb', 'cadxh', 'aphwelj',\n 'huooyzn', 'gtttia', 'izeyhcr', 'cfvxz', 'aitaxyp', 'vypqost',\n 'ebfnmif', 'kgiucm', 'zryyu', 'oxgnbpt', 'frpwo', 'ouqvodl',\n 'pdaazh', 'gxwmf', 'dozxsjm', 'yndpsik', 'zcwvu', 'mihug',\n 'jgodklw', 'ysklw', 'cfxqv', 'yqvtz', 'rctnp', 'xjywa', 'kpqyw',\n 'hhtegzt', 'rnwbeoi', 'uyxqum', 'jahcwbe', 'jzjns', 'ovwoaz',\n 'oqmsrua', 'natbejl', 'deffv', 'okgbr', 'paqhy', 'jkafhte',\n 'lifsknp', 'afmskh', 'oemdro', 'oxuwov', 'qtyxa', 'hkpfsm',\n 'ulaubn', 'tciurw', 'myohwlo', 'okuiejb', 'ormoqsb', 'gmipz',\n 'hterzir', 'ekxzre', 'xkevge', 'ihenf', 'nnhzv', 'eocjmx', 'upzal',\n 'oounfko', 'myhbwub', 'fwipva', 'pkzzvpd', 'nrupm', 'vluzq',\n 'fxkoyho', 'atzktr', 'aomrp', 'qwpser', 'ejagmb', 'cfigelm',\n 'bvanb', 'cgcgabo', 'hmjvlqt', 'hxxocf', 'ftqaud', 'htuipy',\n 'bhwmcn', 'tgyvaqe', 'lvuwh', 'yiabzs', 'rzzavu', 'fiubm', 'uuqsb',\n 'riyakuf', 'psscffd', 'kvckzr', 'fktmnf', 'ivzqexi', 'nhxzm',\n 'kffjmb', 'vdzxv', 'esago', 'bfikw', 'gaiuxmz', 'volokcm', 'jypcs',\n 'psibvs', 'hxaxklf', 'lmqwgy', 'spnbimo', 'mtihak', 'xikoiy',\n 'rmmtv', 'phaqgxj', 'zcuwkhk', 'emodbyb', 'ztahsya', 'ieiqm',\n 'lfoquh', 'emznnq', 'pnhlgut', 'pgvads', 'cqsjx', 'lxnjei', 'zpque',\n 'rdjbiyb', 'sxedpu', 'potnqva', 'iirkn', 'rjmnrxd', 'ksgcd',\n 'waeymnh', 'tizdz', 'kproa', 'wpttygd', 'lvyze', 'peewvgm',\n 'fwtyzbw', 'zitkk', 'gfgqr', 'udgvlz', 'swqspo', 'ohhvyq', 'kgyuau',\n 'hcerp', 'pdomlm', 'twabkk', 'zfsea', 'epiwp', 'xgycjpt', 'jtkdh',\n 'mxmdm', 'rtkzm', 'qkacy', 'nuvdiq', 'agctak', 'hypgyh', 'ewtjp',\n 'paysolw', 'bcutebe', 'xelxyb', 'gzdvrth', 'vpzfv', 'cxrkt',\n 'admiyzi', 'lqlmn', 'zbjpbg', 'tlvdnli', 'zetnox', 'ylcsobo',\n 'balajod', 'igoume', 'sxcgw', 'sbkkafk', 'fmndnnw', 'incsa',\n 'jyupkg', 'uhvvc', 'rswnbth', 'nvprfj', 'figqf', 'znyidqi',\n 'aijper', 'euidr', 'dftxkze', 'vnppi', 'splwifc', 'fprgafl',\n 'ixzaz', 'mrhqtne', 'dtkjsy', 'dsmqrgy', 'xfscz', 'cymvmpu',\n 'vptkfdx', 'zrgrjq', 'mqvwsur', 'hdtlw', 'ugdpwun', 'cvxitc',\n 'vytvqg', 'pmtpfz', 'nfdtdt', 'umvwjuc', 'jouxc', 'qpypri', 'pdhqp',\n 'lmise', 'wlsvcfg', 'aqdkzcb', 'qlrmrfz', 'pbgoyi', 'xmsskoh',\n 'jjdye', 'xvsdmq', 'ymjeipy', 'igjyv', 'uiojvmc', 'uckoww',\n 'grlnyeg', 'hpglp', 'omnnyy', 'iiliir', 'cnucbcx', 'pcxvs', 'hipad',\n 'xmiltkj', 'oorwi', 'qgoxjj', 'jnmviqs', 'wpleqn', 'tudxw',\n 'pcogem', 'hgewaf', 'niwfexy', 'vcttgcb', 'anjgovq', 'epgmscd',\n 'mdtru', 'xvapv', 'rydjik', 'kopppcr', 'mjbsmu', 'unxoakz', 'ldpsw',\n 'frksjr', 'vyxxg', 'yyydri', 'szidq', 'qvbtd', 'qratl', 'xwfov',\n 'bzhqyxl', 'fskrtf', 'pcpzmnv', 'xuxwx', 'vzbevnb', 'ebaqz',\n 'dbpuek', 'ooqwj', 'gaimp', 'coelqh', 'bwuceq', 'oxpfjt', 'zrqyc',\n 'rwllk', 'pqunv', 'ufbnn', 'tbnjoz', 'kkqmrxu', 'qyyrm', 'hislf',\n 'wyuck', 'ubpre', 'pdioi', 'aryhv', 'vdcxv', 'rkgmaag', 'czlzokw',\n 'gtxuduz', 'grpijx', 'qzrar', 'qhues', 'rmznt', 'sxxmved',\n 'onjzuwl', 'atbjhip', 'nrardl', 'alrocy', 'cfkip', 'ihtbf', 'pqdgm',\n 'hmokun', 'dpghac', 'otwml', 'mnbzwa', 'ehetlt', 'rchvq', 'lwjgywn',\n 'lzdmjo', 'nvhohdp', 'tmshcpc', 'gavjv', 'ycnkv', 'uynzh',\n 'bvpnfjq', 'lfbem', 'qberui', 'vrmmhx', 'wpbqtfq', 'jujpx',\n 'dujgkof', 'hrpbso', 'zhcdt', 'iybngyb', 'rgeruza', 'nesyxr',\n 'cihgfe', 'hjgskb', 'zspxeqm', 'inzrgyd', 'crkjq', 'iooshwp',\n 'muvvj', 'wakis', 'rowibwa', 'qikwypf', 'aportho', 'pubcgx',\n 'vqoqpfi', 'rnpbri', 'ussjv', 'looor', 'xkzvdv', 'tstegg',\n 'zgiiokw', 'rwvyaun', 'mqqla', 'asnqp', 'nghuryl', 'hlvhn',\n 'ecuotnu', 'judvbu', 'xgvuw', 'oeckn', 'hdhttsg', 'hcyhu', 'klbyjc',\n 'tnrmqnc', 'mjojxhi', 'kvdet', 'vbmevim', 'oglrzs', 'afbscdi',\n 'zxrffti', 'firzgmz', 'oenim', 'wgpua', 'asiep', 'kyteq', 'wpeneca',\n 'qixmeoq', 'zaofon', 'csxxtr', 'cpwmnl', 'feylas', 'idjuo',\n 'mrtpvta', 'jjvmjy', 'mnljocc', 'lnvjleq', 'oognud', 'rbyneq',\n 'rhvomm', 'fldrkpk', 'znvrp', 'myswmz', 'jiloe', 'juivjmo',\n 'ylhbyzl', 'ndmabkt', 'sgdvlq', 'pmnddmi', 'utpuj', 'kfisv',\n 'nxfeell', 'mxhgqd', 'ccvdsdg', 'emtybo', 'zmkylbt', 'mmrpi',\n 'dkwlgq', 'iwlappb', 'uimsrnu', 'mkxaxmi', 'tcvll', 'njggal',\n 'kmqud', 'evgzlh', 'oaxizbp', 'jiuej', 'xknlp', 'cyksydh', 'gbixmz',\n 'vtouyk', 'sxjpkio', 'qhubt', 'kflvnb', 'sjdfggl', 'bxozyj',\n 'xekbh', 'wtmcb', 'xtapfco', 'rnornl', 'ursdpki', 'waonim',\n 'eibfyed', 'zniinaz', 'uyfohq', 'qcaxlt', 'koyaapa', 'pjuvbsi',\n 'ecpdl', 'ifaqwm', 'yyumzc', 'gvfngfp', 'lttul', 'flyza', 'uasdlme',\n 'oklhb', 'wulkzzv', 'ziwsxo', 'jqcxiu', 'qdzrwgm', 'zjdwy', 'uumns',\n 'emlnp', 'irnrqp', 'gqkza', 'oynpcz', 'yxyea', 'zpamf', 'gyehxbv',\n 'nplkhcc', 'rxeekyo', 'kecgp', 'gseju', 'nkisxqf', 'vlyud',\n 'fxxihhm', 'yjgtml', 'fehwpdi', 'wclnvyy', 'lriwrc', 'ikparv',\n 'volfh', 'ysphh', 'szrvrv', 'rqlmz', 'jyqut', 'fyftsj', 'uvwfip',\n 'rngwgm', 'mjwaz', 'roehjki', 'ploxokr', 'yjbalp', 'fspkq', 'yfxrb',\n 'kzulvk', 'ordxp', 'vdrrt', 'wdiojwd', 'ridzl', 'niykdvu',\n 'whyycmn', 'riwcma', 'bkhgkrb', 'nsine', 'emgtgf', 'zoymw',\n 'ljtvhzb', 'kfyfdma', 'piygxdl', 'onfwgdf', 'fwmkm', 'vqbljay',\n 'icife', 'bxfli', 'yeygr', 'qenhgm', 'mtxuckj', 'kdcyx', 'kwqhfcn',\n 'ywkfy', 'prbpw', 'pheyc', 'kmnds', 'cacqs', 'kvekiqy', 'bfvfhdy',\n 'gxulp', 'skmcra', 'exomt', 'lcxue', 'mnvvday', 'rsddl', 'gooegc',\n 'udght', 'doymnin', 'ccdap', 'wuive', 'dyyln', 'rynust', 'luxabyg',\n 'kdkkyyw', 'vawqfsy', 'rmeswm', 'rcxzyv', 'clpowz', 'pdntqm',\n 'tvjkkmz', 'iiclw', 'nhudzen', 'cybhu', 'crwtw', 'enypnh', 'ygekg',\n 'hrjwqt', 'peissge', 'wangcy', 'rbpoik', 'raqulbf', 'gyisnsj',\n 'rgbqn', 'lgvuzb', 'djicf', 'epnuu', 'nsapc', 'voatgh', 'yorfehc',\n 'jxfttat', 'wyuivb', 'bwopl', 'odwdsh', 'anchkv', 'sepvew',\n 'qoxxmae', 'bpvqnj', 'sngfo', 'buoazou', 'zhijssa', 'janng',\n 'uvdbd', 'yfvkqo', 'lcjii', 'mvacvrz', 'xztiar', 'lpbtrqa',\n 'ukbpdx', 'okaqpgr', 'idgqlj', 'ewglgo', 'ruymhi', 'pcidw', 'bvuqj',\n 'npzch', 'yppyan', 'oiguirj', 'iijvwqj', 'jvbwjys', 'yjtunfc',\n 'iaikra', 'oduhdgk', 'ivixur', 'ibcgai', 'djzvcbx', 'lmtsul',\n 'lgnwzol', 'wursq', 'xsxbqwq', 'jqvwnc', 'dcwwvtb', 'vwybnr',\n 'bughwjl', 'rnelxb', 'hmacv', 'ufgdygl', 'aabuat', 'oynwask',\n 'gnfjjf', 'zipbq', 'zxstn', 'jdrbprf', 'jmkvny', 'rblpql', 'vykdj',\n 'qaakyqw', 'osbhddb', 'avgldyy', 'kvpoa', 'fnqcliu', 'zzlninw',\n 'drsal', 'omswys', 'hwqcpct', 'ecraq', 'fvhsbjq', 'raauy', 'pfmoz',\n 'vvqvcm', 'tbjqjun', 'jcfbegq', 'otiwup', 'axvvce', 'dhpdnx',\n 'pennr', 'hvvmvzv', 'binezl', 'ygdmcuo', 'ypwnqn', 'aloxdv',\n 'ucieh', 'kovbtag', 'rgfpaww', 'fpbftg', 'spjowfr', 'zridoy',\n 'blwbbf', 'evwlxi', 'itbcz', 'hgixuo', 'qmoqmjb', 'tkeeis', 'pjiaq',\n 'rbpje', 'ledoui', 'ubecht', 'mphdd', 'uzswsbb', 'ntsybr',\n 'qmnijyp', 'pqwawe', 'ltytill', 'dpnxy', 'pkxqcol', 'ayrdi',\n 'mycnd', 'knotsn', 'zvcrjl', 'qwroblg', 'vtrktey', 'dzilezi',\n 'wzkxg', 'varqc', 'xlpttyc', 'xxqhnl', 'jpxywa', 'kjdsh', 'hdseebw',\n 'bxqbp', 'flazqce', 'xrtab', 'rupsfq', 'asswer', 'rhqof', 'hjzdv',\n 'addsgax', 'cuahzjj', 'xwdilr', 'osqgg', 'pfhwv', 'rqorah',\n 'ggdlnv', 'truvaoj', 'jzuldwf', 'mjddj', 'vixtn', 'eslxoaj',\n 'cmoypm', 'jvvzs', 'oqgxcc', 'tptls', 'wwgwbj', 'tysuhg', 'xbnqb',\n 'iogjvg', 'fbxdmr', 'zdvsmx', 'hiuja', 'watrt', 'kjawab', 'entxk',\n 'jmnkaox', 'zznsox', 'asmzc', 'soblvp', 'quyxjw', 'udrdc',\n 'hyylvvw', 'gzfwxuv', 'jjqmjw', 'faegxbl', 'lqjcg', 'bzmruq',\n 'bykuh', 'miwhd', 'ykgtwhk', 'oyobzwi', 'oltwpua', 'ctulabr',\n 'dwandd', 'vhuhox', 'vtlknw', 'ywvln', 'qemqdeg', 'akezvx',\n 'kjmjpv', 'vwuftx', 'kreaxnj', 'fvfop', 'cxabs', 'jfacbje', 'eecnz',\n 'cmblit', 'gfvpoq', 'whywnh', 'pghvx', 'ohgkmf', 'xxtiwd', 'nkojni',\n 'dlcicnp', 'bwyvyyd', 'gifup', 'vgjfr', 'hhteifi', 'kjhffq',\n 'pawqaxl', 'yozro', 'slxluvd', 'amqcquy', 'vnnxkr', 'wgdur',\n 'rvawiu', 'thcwnc', 'cddut', 'vnrtrv', 'fnfio', 'nhvxe', 'rfdqmj',\n 'ucblh', 'ccbnt', 'lxckaoy', 'fnwcbx', 'gmdbiwt', 'ypvwjy',\n 'cbjazk', 'qmujnm', 'nsqot', 'lhcqt', 'ijxcts', 'nujrms', 'itxel',\n 'ghukr', 'qpwitlr', 'gcafqrn', 'lcoho', 'lfzab', 'vwhgceb', 'vgsgy',\n 'jrtgo', 'ryxlz', 'deoyq', 'ybenly', 'lyysca', 'sodvazo', 'hbnnoz',\n 'ovgvda', 'elwtjx', 'soydmn', 'trdsi', 'mwwjwo', 'vupwj', 'dszpcv',\n 'kkhjdj', 'ewmyo', 'nmpeq', 'oepldcq', 'xttrgu', 'wbcbxi', 'jakzk',\n 'peukyw', 'fvcqv', 'xklwuu', 'hsmva', 'kslmkq', 'azllbig', 'stnzih',\n 'wfyud', 'ihauy', 'cfxmj', 'pdyogwv', 'dcqdpa', 'xhusy', 'jfpmpmm',\n 'odeiiw', 'ozyaer', 'uykzvma', 'tuaznxj', 'kdnbdki', 'syrnsem',\n 'fdysz', 'hhrpo', 'fglzfi', 'vgcqzqm', 'qhsjr', 'bvboe', 'dpfwpvg',\n 'mvvry', 'itnnr', 'lgykbe', 'pscow', 'mkrgeqv', 'czffv', 'apteht',\n 'jeqixsx', 'ksmbe', 'zamivv', 'vvmyo', 'cwwoce', 'sppubxc', 'qaich',\n 'nmbxr', 'tfkwfxi', 'iakhezl', 'fxujis', 'fkwffe', 'antaylq',\n 'mmfgstq', 'zxaacy', 'zlswx', 'pbqxil', 'eupck', 'qzcxpbe',\n 'rjalbzr', 'wioagbq', 'kreec', 'zsdcuft', 'rrdzb', 'ocdlvq',\n 'oxiroo', 'zcxsqh', 'wbrsi', 'fqike', 'oskzupi', 'thvof', 'dicbyst',\n 'iojwe', 'hyfizq', 'yoknhww', 'nupiyyn', 'ievah', 'slcgmxg',\n 'cnecpa', 'lcwsoj', 'hnqsc', 'ghipbi', 'exobr', 'nwpnq', 'dmhbj',\n 'amdbmwl', 'xfbzovs', 'puizvu', 'yvsus', 'ykysqg', 'bgqdv', 'zgqbr',\n 'zkjpkej', 'crkot', 'zciymk', 'tleogn', 'sayrmz', 'elwma', 'zugjva',\n 'uifwsmw', 'wstrg', 'xbotd', 'hinsg', 'qpgyoyp', 'xzfocdy',\n 'mbvuepb', 'dtphufk', 'cyapnt', 'yyehhad', 'ohdrd', 'mlibm',\n 'qzdfil', 'rdwszqx', 'bzcbmyn', 'uarjlg', 'mtwpqmx', 'nmagl',\n 'cepniel', 'tylvaa', 'melhd', 'jygeneg', 'fdglfy', 'xcpciu',\n 'ayrel', 'bxceshv', 'kspyg', 'iclkaz', 'ykbzt', 'nrnkzo', 'kxkto',\n 'fabzszn', 'edalls', 'nilmh', 'wwawgnn', 'gymbtx', 'mzipa', 'ajevx',\n 'qppisv', 'otqhsf', 'ippxak', 'bixnqd', 'uqitwo', 'soxcug',\n 'loiscd', 'wqrjk', 'rqntoa', 'fzpxlp', 'tuaob', 'pyqqms', 'krbzmmj',\n 'aijqpfg', 'nstqrbu', 'wmtiahz', 'joplby', 'jyszxq', 'jnxtyhe',\n 'lbvfv']) == 14011\n", "step-5": "# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author: xiezizhe\n@Date: 5/7/2020 下午8:52\n\"\"\"\n\nfrom typing import List\n\n\nclass KMP:\n def partial(self, pattern):\n \"\"\" Calculate partial match table: String -> [Int]\"\"\"\n ret = [0]\n\n for i in range(1, len(pattern)):\n j = ret[i - 1]\n while j > 0 and pattern[j] != pattern[i]:\n j = ret[j - 1]\n ret.append(j + 1 if pattern[j] == pattern[i] else j)\n return ret\n\n def search(self, T, P):\n \"\"\"\n KMP search main algorithm: String -> String -> [Int]\n Return all the matching position of pattern string P in T\n \"\"\"\n partial, j = self.partial(P), 0\n\n for i in range(len(T)):\n while j > 0 and T[i] != P[j]:\n j = partial[j - 1]\n if T[i] == P[j]: j += 1\n if j == len(P):\n return i - (j - 1)\n\n return -1\n\n\nclass Trie:\n\n def __init__(self):\n self.dicts = dict()\n\n def add(self, word):\n node = self.dicts\n\n for w in word:\n if w not in node:\n node[w] = dict()\n node = node[w]\n\n def search(self, word):\n node = self.dicts\n for w in word:\n if w not in node:\n return False\n node = node[w]\n return True\n\n\nclass Solution:\n # def minimumLengthEncoding(self, words: List[str]) -> int:\n # kmp = KMP()\n # ret = 0\n # texts = ''\n # words.sort(key=lambda w: len(w), reverse=True)\n # for word in words:\n # idx = kmp.search(texts, word)\n # if idx == -1:\n # ret += len(word)\n # if len(texts) == 0:\n # texts = word + \"#\"\n # else:\n # texts = texts + word + '#'\n # ret += 1\n #\n # # print(texts)\n # for word in words:\n # if word not in texts:\n # print(word)\n # return len(texts)\n\n def minimumLengthEncoding(self, words: List[str]) -> int:\n trie = Trie()\n ret = 0\n words.sort(key=lambda w: len(w), reverse=True)\n for word in words:\n if trie.search(word[::-1]):\n continue\n trie.add(word[::-1])\n ret += len(word) + 1\n\n return ret\n\n\nif __name__ == \"__main__\":\n s = Solution()\n assert s.minimumLengthEncoding([\"time\", \"me\", \"bell\"]) == 10\n assert s.minimumLengthEncoding(\n [\"ojtnj\", \"uuydcho\", \"dgsyp\", \"dwxycpx\", \"dpmvc\", \"dvfhmb\", \"flrxjjx\", \"fwhdhvn\", \"rgsakp\", \"aiconf\", \"nzacpk\",\n \"sbxnaj\", \"shway\", \"rgrmz\", \"rysudo\", \"bzkioce\", \"mqxkzvu\", \"wyebk\", \"tymoaz\", \"mlmbg\", \"djbmek\", \"qfnme\",\n \"khkiyae\", \"tjdaxry\", \"sqtcwz\", \"ehnsai\", \"jhncvrm\", \"cxkzgrx\", \"pummt\", \"hzrpfcn\", \"lkyqit\", \"phpqdxw\",\n \"vangm\", \"wcjdgw\", \"pxesvtn\", \"mnqory\", \"bdrzvh\", \"brtzmo\", \"chqgf\", \"bipyxm\", \"meoikg\", \"ysyckk\", \"ojayeiq\",\n \"zrfbsb\", \"yhuotea\", \"crfbhq\", \"tllycn\", \"qxnzihf\", \"avyawpz\", \"bwsjym\", \"myjozc\", \"lbdksm\", \"mctlt\",\n \"dszowuw\", \"syshm\", \"xrvhhkn\", \"kgrcwfv\", \"dwlajlf\", \"yviuk\", \"xegjj\", \"spiczl\", \"vfvomi\", \"mgcujy\", \"dqmzb\",\n \"isrisgt\", \"vdrtuah\", \"vsyth\", \"eoclef\", \"poccek\", \"cgafrlu\", \"crbhpgk\", \"sromv\", \"xmvbca\", \"gobra\", \"ygvlq\",\n \"pjvhe\", \"tfweiso\", \"cskuohg\", \"eyalone\", \"pobkak\", \"nzpxn\", \"lbcrws\", \"uhtfe\", \"eorth\", \"showvu\", \"hxsmb\",\n \"jrggose\", \"izifkb\", \"oqwyf\", \"mozmzj\", \"ijwle\", \"ggtqqqv\", \"geevzj\", \"meota\", \"ifsse\", \"kdtofm\", \"swydhvf\",\n \"tzjhqap\", \"wqwwd\", \"jlinnov\", \"lmxkgeg\", \"stbot\", \"xrsfn\", \"etoyctk\", \"rygagm\", \"vcnrf\", \"zkdge\", \"emqtscp\",\n \"newqcyy\", \"nnuus\", \"exwsxbd\", \"zstvl\", \"lbkko\", \"kygkyqq\", \"oggji\", \"xytbjo\", \"mfbahk\", \"ggoks\", \"lmqewkl\",\n \"qexhyqe\", \"ogaogio\", \"nzvbav\", \"mdole\", \"qvyks\", \"gkupfu\", \"dgmpn\", \"ngrdrj\", \"iitqvk\", \"ipuiqb\", \"ugxfea\",\n \"ialkmv\", \"hmgnx\", \"aoyoj\", \"fvzhjil\", \"butrbp\", \"dwhxnes\", \"etkdwg\", \"cjkghz\", \"tovkq\", \"mmxhv\", \"jgcsn\",\n \"hmictal\", \"zxmnek\", \"pcoeg\", \"ntyqmlq\", \"hfubhtg\", \"ydjbv\", \"xnwlqto\", \"hatgi\", \"bsaczd\", \"pokwk\", \"arxlula\",\n \"zjtqlk\", \"ocfxup\", \"nsnqjc\", \"xdcsopi\", \"iqxyxp\", \"xfmtpvm\", \"bqtgcf\", \"wboycn\", \"aoeda\", \"uowqdgj\", \"rzzzx\",\n \"liucs\", \"ejzxz\", \"qmlehsh\", \"igrbmon\", \"dpmkbon\", \"pmayh\", \"nujdwdw\", \"awdgo\", \"ijgkzk\", \"inhee\", \"jzdtv\",\n \"adhauh\", \"grtmbp\", \"qndbvw\", \"zprrw\", \"mpqieq\", \"jzmzeuu\", \"fcvftqs\", \"qxzxqy\", \"lidguzz\", \"eazwd\", \"zjhfsz\",\n \"zsnzefh\", \"mnckfg\", \"zjgtq\", \"ckyxlif\", \"fznfo\", \"jegnof\", \"lzwyzb\", \"ozivfio\", \"igkclsa\", \"bebzn\", \"bitsggm\",\n \"lrnwin\", \"hjnnzr\", \"idvoirn\", \"dgile\", \"vfngh\", \"xbmur\", \"rqaftt\", \"wjwwwxs\", \"btreou\", \"gjsycg\", \"pvsiylz\",\n \"ccxzgdf\", \"excrrrr\", \"fiesr\", \"jdioj\", \"uzwsc\", \"odrlcoy\", \"hcsit\", \"ptwfprh\", \"sbqry\", \"kffvy\", \"ejeawbp\",\n \"omvcc\", \"iqgxqlt\", \"edsuu\", \"xnbue\", \"qfbcx\", \"fzlmbkl\", \"wrrcueb\", \"mmqispp\", \"nknilwd\", \"dewuhju\",\n \"hmdqlxy\", \"vjxgg\", \"lkuexo\", \"dzvfscm\", \"voulbs\", \"uevoqgq\", \"kmhwu\", \"oglzllg\", \"torhihn\", \"fhuqzc\",\n \"mmcfhb\", \"woyayma\", \"uznsvre\", \"mmxed\", \"aoskwg\", \"xrosbm\", \"hpyrgh\", \"tghwbwh\", \"hcwzn\", \"iepeftj\", \"judij\",\n \"kudbk\", \"jonpv\", \"lywck\", \"rxelz\", \"bgifz\", \"mehbxq\", \"fmqnz\", \"sqrmzj\", \"iqqjzex\", \"qioliz\", \"kjizbf\",\n \"lgdcffc\", \"pfgmcr\", \"trdabul\", \"vlqjdnc\", \"jjvbxe\", \"fqlayw\", \"ilbhtyq\", \"saawulw\", \"gxysrb\", \"kighql\",\n \"eceapr\", \"kztbcww\", \"jedkoy\", \"dxpcaga\", \"ndacphe\", \"rcoit\", \"ywgcnxg\", \"klipfup\", \"bddws\", \"jwyof\", \"lrfwgo\",\n \"bediwuf\", \"ujakh\", \"ppima\", \"xzhwvm\", \"guzmsqt\", \"ffbliq\", \"adjmynm\", \"akabzn\", \"inmykju\", \"vlcjyv\",\n \"orquepg\", \"tufrk\", \"vqpjymm\", \"lvuab\", \"qzxav\", \"ekcmu\", \"uqtuhie\", \"kfvtgf\", \"nklwjo\", \"ujxlfpl\", \"zobfpq\",\n \"eignijd\", \"ythctg\", \"artllm\", \"wodhh\", \"tzpwszq\", \"njdqegg\", \"hzrqib\", \"zvoxtfd\", \"htboem\", \"axjuix\", \"bvmvm\",\n \"jbnum\", \"bxdth\", \"atejt\", \"gqsqtnk\", \"fykrjbp\", \"ldyhonr\", \"wcuoj\", \"upphc\", \"agydg\", \"cjmwk\", \"rhxbqh\",\n \"tpgozdd\", \"qyqoy\", \"zjqutw\", \"qoohqny\", \"nsiacwz\", \"xupin\", \"criuvs\", \"eswjeft\", \"pdmevn\", \"zvogq\", \"lrrvo\",\n \"qhfqqpw\", \"ktudfg\", \"ijvmi\", \"neyjjdx\", \"rllpi\", \"vllvaa\", \"esebtu\", \"jyhcrh\", \"otgmr\", \"oudvyxj\", \"pmszy\",\n \"opeed\", \"gicni\", \"mnuzn\", \"mjbfpod\", \"sqwgxu\", \"dwniwz\", \"wmbmmv\", \"lyafuy\", \"zmvlz\", \"kopxzuh\", \"urcbbiy\",\n \"guhco\", \"nerjm\", \"lpdxc\", \"hxmjzz\", \"hynagc\", \"iyxeczi\", \"bdfxmoz\", \"yybnpqd\", \"jvgnb\", \"oquqem\", \"fmclmz\",\n \"dmkhf\", \"zxbjpp\", \"qpxgcir\", \"iecvjm\", \"gtkne\", \"lgtqrbc\", \"gilbn\", \"mcxsg\", \"ncwbhn\", \"wkriiq\", \"zhsir\",\n \"ptkkmw\", \"jcbpkrm\", \"vbefo\", \"vmbcd\", \"vqffj\", \"fhqzjt\", \"nryuh\", \"vmclav\", \"cjyggm\", \"sanev\", \"rrdocz\",\n \"zqdexbs\", \"jrxstt\", \"pyhcesj\", \"aagghyr\", \"cyemjrb\", \"aliohf\", \"qaslg\", \"pnyjzxz\", \"pehnvi\", \"suhuw\",\n \"twopabr\", \"sapqoc\", \"mckrh\", \"nzlgrxt\", \"aqpobnu\", \"pirbjgb\", \"plzlj\", \"raylxpu\", \"gyasfrh\", \"urjfxux\",\n \"xjbwau\", \"iupknn\", \"vhxnc\", \"dnbjop\", \"vrxhwmd\", \"vjsmkh\", \"rfmqids\", \"smaiwt\", \"vkyfo\", \"bjqyxc\", \"rbbbp\",\n \"dlkzg\", \"dwvdwu\", \"prulzh\", \"bavge\", \"ehhrz\", \"xxjqk\", \"pxopmp\", \"okmkmb\", \"slcznpp\", \"nvqlb\", \"jalrk\",\n \"parwlcd\", \"anbxo\", \"oqcxyzo\", \"fjhrdjh\", \"pgvnwfe\", \"yfjyvh\", \"quvszjm\", \"xyiig\", \"xtncqv\", \"svsix\", \"jvpdnh\",\n \"owuiv\", \"bsrugtt\", \"rmvggws\", \"lmdql\", \"kvmvd\", \"xrpmaw\", \"ssnxyb\", \"oworq\", \"rmmpuya\", \"rijpih\", \"aelazka\",\n \"kncksqx\", \"yvtdiy\", \"epato\", \"pbbamj\", \"fejsw\", \"zgsru\", \"ekwrre\", \"zqben\", \"vugxi\", \"fvcsdp\", \"rujcews\",\n \"asqxya\", \"worjlsd\", \"xggakg\", \"kzfpot\", \"haqon\", \"ypqxzz\", \"mmkzwt\", \"bdhif\", \"exzhv\", \"srnklzh\", \"hlrunb\",\n \"dwfyke\", \"fvgbtdm\", \"aeutp\", \"czhefx\", \"tegfw\", \"jkxpsb\", \"gxkfkw\", \"exvntd\", \"gvuti\", \"jdmly\", \"owaqhw\",\n \"fopuxzv\", \"edrvil\", \"biszwgv\", \"vgckzd\", \"fqdxn\", \"qktdf\", \"hpgwrk\", \"gpxiips\", \"vxnlab\", \"yylxz\", \"hsuscch\",\n \"bhivaf\", \"wzrwtc\", \"ebplv\", \"yzxykou\", \"mxlssom\", \"evghv\", \"hksleg\", \"shybau\", \"zeyqa\", \"tljqka\", \"axfkec\",\n \"fatdj\", \"janlkcc\", \"sjorbra\", \"jplge\", \"oazzot\", \"qbgtncn\", \"ozlil\", \"stohadq\", \"rvpuwn\", \"oqwpl\", \"byftgi\",\n \"ubuusl\", \"fkogr\", \"bybdyhj\", \"vinyuzs\", \"ivsqvz\", \"vmnae\", \"gckxw\", \"rozbe\", \"glvxwj\", \"rcgicu\", \"xmvbd\",\n \"itycsry\", \"llmwrs\", \"fuqth\", \"styrrwl\", \"wsseuln\", \"xwflcli\", \"muxgz\", \"ypmbboh\", \"rpmvnep\", \"wjvvnv\",\n \"arjnw\", \"toauwc\", \"ltjxqrl\", \"basffd\", \"clxozwd\", \"glmrv\", \"iejgfj\", \"cvkoj\", \"wotjf\", \"mqucec\", \"xalgemc\",\n \"hgimkh\", \"golvfq\", \"fuqpmak\", \"mhpcp\", \"pxoibt\", \"ledqa\", \"guzbyr\", \"ztvbeka\", \"racdp\", \"krsngra\", \"aaiknz\",\n \"bhoobyc\", \"xibbe\", \"yohepxk\", \"eclevs\", \"ldliwcm\", \"qatvlk\", \"eiypbw\", \"vxvtwa\", \"nkdwsej\", \"ftmyvp\",\n \"gpthye\", \"gazwoi\", \"zzgipon\", \"cithg\", \"wpabujl\", \"jhezlnb\", \"vqqaxfg\", \"kvpbk\", \"vggjemp\", \"owylv\",\n \"lgwtfpg\", \"jjqvfm\", \"xbhga\", \"tulvfv\", \"sefuo\", \"hbysv\", \"ozopepd\", \"awyrifd\", \"pnudwx\", \"vreje\", \"zhpgw\",\n \"qygbf\", \"tvbrvy\", \"zzmcw\", \"cznee\", \"deuzxt\", \"qfppjvi\", \"ilkps\", \"ydwhg\", \"krwkxzu\", \"mnsidg\", \"rkxyyr\",\n \"ajkqz\", \"xtmom\", \"vqocor\", \"fympcl\", \"yyleyzy\", \"jjvzhrn\", \"kpmxvuz\", \"txoeqlx\", \"lhhmn\", \"chzgpf\", \"ncnjxle\",\n \"ihxrg\", \"feqixq\", \"lkfhcar\", \"hfnsh\", \"bifczy\", \"umknat\", \"yrhgkh\", \"mgpcu\", \"qotukst\", \"yqlmfq\", \"ttcdp\",\n \"xnjjzm\", \"cukbr\", \"hjhjb\", \"iikfcsr\", \"nsqbnnz\", \"dauygf\", \"cmydq\", \"lfnhqnl\", \"ppqgs\", \"hscbfug\", \"ohzisud\",\n \"opspdkv\", \"aauxbop\", \"wpkhzo\", \"sxbsgu\", \"tajrv\", \"ololy\", \"mxmus\", \"vizvxv\", \"osaqz\", \"rxygkn\", \"mrzqlf\",\n \"zrriyxb\", \"ufroe\", \"bajozg\", \"atpsu\", \"uhgauzu\", \"tffdw\", \"mdjulde\", \"rbrmy\", \"jhkqvwl\", \"gzsultq\", \"nkbfi\",\n \"xtvwh\", \"dryzcv\", \"emaxuk\", \"zucvutb\", \"jdduyk\", \"bjdin\", \"loicuq\", \"qhjjb\", \"rgfjbq\", \"mphnk\", \"lxvceyx\",\n \"zeoxb\", \"fxhnxu\", \"qpbipe\", \"ophwp\", \"wiioer\", \"quchwj\", \"pouxunw\", \"bloxgg\", \"xbsma\", \"dtwew\", \"xstorn\",\n \"qfrfkz\", \"gxusbsn\", \"dhnxd\", \"mhstbs\", \"hekbtu\", \"wvrrjw\", \"yeiwd\", \"patplsx\", \"qmyiyi\", \"mowboj\", \"iskyd\",\n \"bqhjj\", \"povppk\", \"vthpwx\", \"uuydaw\", \"rduxvez\", \"vmcww\", \"ylruvph\", \"ymqosp\", \"wzcvohg\", \"lhepwta\", \"bckhc\",\n \"oiyyt\", \"wqzfv\", \"uduec\", \"lkkbtzl\", \"prvpbo\", \"jrwstii\", \"ijztoo\", \"qwwth\", \"vqzqiun\", \"krnjp\", \"zyanpiw\",\n \"ojhjhvg\", \"lohmb\", \"thqtf\", \"reptzv\", \"zgkyq\", \"lhkvy\", \"cmjwl\", \"fmilgpw\", \"jrfawz\", \"vrtzd\", \"ezgfl\",\n \"plzng\", \"zidzso\", \"civavlg\", \"vtwopu\", \"ljhckxo\", \"nuydt\", \"qembl\", \"fiwrre\", \"gfrgi\", \"gzegiq\", \"mltlqo\",\n \"pcett\", \"snbsc\", \"msibcqn\", \"beacrhz\", \"vsycjt\", \"gjqji\", \"smcegol\", \"zregkp\", \"smcazoj\", \"dziqad\", \"jpuwp\",\n \"hnlztac\", \"vduitco\", \"wyencad\", \"bkdnnqo\", \"cabzyg\", \"mgpcwr\", \"fxgvkxt\", \"wlkcrdd\", \"bhmhsy\", \"gqcctjc\",\n \"atafpt\", \"vdzhmcg\", \"ighxj\", \"gfqpale\", \"fohbrtj\", \"mfpsgt\", \"tarjocf\", \"gyycb\", \"qvqfryl\", \"jpwowwc\",\n \"jcgcg\", \"gmrjze\", \"nfptxq\", \"hmjhxge\", \"ieelj\", \"suvkgr\", \"nwjxe\", \"tkepqm\", \"extnpmq\", \"rxzdvf\", \"relzaa\",\n \"hfhgaq\", \"lmihlz\", \"pacocq\", \"dclxr\", \"oknoem\", \"pbpnnd\", \"nleerfl\", \"tvytymc\", \"aamfnl\", \"ufdnq\", \"bxyzvyh\",\n \"vksvout\", \"lohxhf\", \"sskgn\", \"aawbv\", \"hrvhx\", \"wvoqf\", \"vxkvh\", \"oqany\", \"bcmyd\", \"epdddqn\", \"zrlej\",\n \"bchaf\", \"hmftii\", \"mefcrz\", \"wbxvc\", \"ewwnldf\", \"cqecxgh\", \"cnwvdmk\", \"vetrw\", \"zmogwov\", \"lshlzpe\", \"lijay\",\n \"tcdqg\", \"xavqixd\", \"yjkhtsl\", \"myjvow\", \"cgthhd\", \"taaii\", \"iuuegk\", \"lcypmle\", \"wesrit\", \"tybco\", \"nhxysw\",\n \"awkrj\", \"jcmqa\", \"porvo\", \"nrypriu\", \"vznnevp\", \"hzklwi\", \"vapuxh\", \"wyfkn\", \"albemu\", \"ttfdbl\", \"dbqrjv\",\n \"cxals\", \"qzitwf\", \"ysunur\", \"llsefy\", \"cghfzji\", \"jboaa\", \"emhlkw\", \"khhmgha\", \"twlxgjz\", \"pyujor\", \"ozcax\",\n \"fetvovo\", \"mdhrrd\", \"qdhdne\", \"fiuvw\", \"ebyxh\", \"ldaothh\", \"vwyjf\", \"yjyljlu\", \"ivroqg\", \"qvpeyec\", \"eemsdra\",\n \"wavgeqk\", \"bjejrqg\", \"mdjimoz\", \"fgopy\", \"lgwodr\", \"cunvszh\", \"wiver\", \"ghmog\", \"jzgfyk\", \"vxlbx\", \"kvgbtn\",\n \"cunorte\", \"mtesdc\", \"zdzmqu\", \"pigik\", \"smruadg\", \"czjxlt\", \"kukgaok\", \"tsldpqq\", \"luomo\", \"ezbcvdc\",\n \"tfetwes\", \"uopzf\", \"wsvezkw\", \"wrnlvbx\", \"bpqungd\", \"jqnnof\", \"rqhiomi\", \"voulqb\", \"ouspxn\", \"chngpz\",\n \"fbogfcv\", \"nqhunxo\", \"rydbke\", \"ewduo\", \"suqqwup\", \"oxzfxj\", \"kuwfwm\", \"euiics\", \"mvftoau\", \"vstfbm\",\n \"vnmtoo\", \"muicf\", \"bjbskxb\", \"knbomlf\", \"enrbtfk\", \"hnaqe\", \"vxzsr\", \"gkqma\", \"qygmn\", \"ztkybmb\", \"injggpk\",\n \"enqrgdk\", \"rkgoct\", \"tgaiu\", \"dnknoxk\", \"iwuou\", \"oxanccl\", \"xestej\", \"ekrqq\", \"xbwhz\", \"jkdvxfh\", \"oybaay\",\n \"afyhci\", \"papffjq\", \"bdppssw\", \"qwyvjx\", \"xmnnosl\", \"kvqzjl\", \"wcwii\", \"ygfvt\", \"tpabbht\", \"kjmaq\", \"duschjz\",\n \"gguiof\", \"wgfhve\", \"joqmfjq\", \"smqfd\", \"ynlovlz\", \"sgrzum\", \"bobmux\", \"dcppi\", \"isdjrwl\", \"lbevb\", \"efqsirq\",\n \"hlgfql\", \"enmemlb\", \"dbmfk\", \"ibfpzm\", \"rtdnooq\", \"yicdq\", \"xadul\", \"dxibxzi\", \"yyxnj\", \"jhsdzxw\", \"thltbi\",\n \"kwhreyi\", \"hrocoa\", \"fnaalbd\", \"vnwona\", \"nnonm\", \"naqaf\", \"xgzzies\", \"uhruynk\", \"kgadfx\", \"hyohzbd\", \"hnajx\",\n \"yipzh\", \"ezdxaet\", \"xbzppoz\", \"rwnewxz\", \"hlcbkmb\", \"znyhu\", \"zsqtpkr\", \"gmyxr\", \"rphyvo\", \"bgjuz\", \"nulpv\",\n \"eejfoso\", \"xmwcnes\", \"xxxxnpe\", \"jezkk\", \"idfsxrw\", \"qgzjtf\", \"arpzpo\", \"hxsanlt\", \"emvotcb\", \"sknzhvg\",\n \"icitca\", \"ivhdln\", \"sqilerz\", \"ndigw\", \"bcsre\", \"mibbep\", \"zsczom\", \"cgghjbb\", \"fkylfgt\", \"bvzofs\", \"mefsng\",\n \"bispbza\", \"tsosgy\", \"xopalrw\", \"wserf\", \"jbmlz\", \"xidxny\", \"ffmpjos\", \"vddwxmd\", \"netnsg\", \"kgevsp\", \"pguuv\",\n \"cwisp\", \"slxiyb\", \"dmwaguc\", \"jobwusu\", \"uytcqrv\", \"hzhsy\", \"zrlsdd\", \"xhxah\", \"rxzij\", \"zwdgy\", \"ygmvkz\",\n \"drkzbo\", \"qpsal\", \"tpxvl\", \"lfmfl\", \"sayjvlh\", \"rdamym\", \"ycuzd\", \"zkycu\", \"hdesec\", \"unequk\", \"lpkdid\",\n \"vorxls\", \"admsdop\", \"rqnvkyg\", \"krnqqtb\", \"rxfms\", \"xfthd\", \"pxjbk\", \"gpslrg\", \"rwziwef\", \"usxgqvz\", \"baxxye\",\n \"ocrkkrw\", \"lrlgsp\", \"ceyctg\", \"rniml\", \"vavug\", \"jgircl\", \"jrpnmsa\", \"rywvlfg\", \"prxnys\", \"fkzmknn\", \"ooelc\",\n \"btvfs\", \"yqepuvw\", \"tmmmb\", \"qmpzexb\", \"zjckjvd\", \"aieytbb\", \"oafqq\", \"szrcyh\", \"czrxgae\", \"ifkte\", \"hfgajox\",\n \"pwpnkqq\", \"yqphogn\", \"xuwthrd\", \"mpcmy\", \"qitdoa\", \"avlzfrh\", \"ywpip\", \"dgeki\", \"fgbnx\", \"tyofu\", \"xziqzj\",\n \"qxzvqz\", \"vtsqk\", \"ipkld\", \"yfhim\", \"ebaegdc\", \"ubhrh\", \"ldejv\", \"mtflwy\", \"ocpyj\", \"yopgqs\", \"fkjxxd\",\n \"njnnwr\", \"nylkeb\", \"taymdqv\", \"ekpznq\", \"cbzobmg\", \"bucdds\", \"qjozu\", \"uvpghor\", \"obhnu\", \"ljkxbg\", \"uqrxjtf\",\n \"xwbxiw\", \"oxsmcg\", \"spchdd\", \"pcuitj\", \"faidq\", \"tybmy\", \"uygiyp\", \"qloizj\", \"cafgmy\", \"smetd\", \"kwcwb\",\n \"tdabxf\", \"fpmrc\", \"lfjujn\", \"vvmvex\", \"mnsgdc\", \"enjlgsw\", \"ohwcg\", \"kxjdaup\", \"rotjarp\", \"aovdoq\", \"oviwq\",\n \"qwaxs\", \"bmazco\", \"plcljsv\", \"yytjhl\", \"vgwjm\", \"drnue\", \"vqjgf\", \"uqlsfy\", \"bmqmfp\", \"lkauwna\", \"ozmqce\",\n \"heunaxr\", \"zaffbj\", \"arbek\", \"qjnllw\", \"fdkhlz\", \"wgmbwh\", \"yceqag\", \"ltjjq\", \"yurggfw\", \"puaafsl\", \"tjiqkyt\",\n \"yuzub\", \"ytmrfq\", \"ommmu\", \"ipknn\", \"iubnuab\", \"dzthvc\", \"zjbzpew\", \"dcooev\", \"pjydqcf\", \"zuojlzy\", \"zwjyfc\",\n \"spmac\", \"dfkbnz\", \"fzriie\", \"asusog\", \"hdodx\", \"drjpo\", \"ddyif\", \"chabv\", \"ebvkwrr\", \"burdjl\", \"jjddi\",\n \"dljzkye\", \"samyg\", \"zwgxcq\", \"xtratwo\", \"qfopz\", \"xvlaw\", \"laage\", \"btdium\", \"vzlnzt\", \"kmvbzkq\", \"kctobsx\",\n \"kazbelu\", \"yxdwrk\", \"eslvjc\", \"nhsdmvs\", \"zuxqcc\", \"hqtxovn\", \"zrbdai\", \"fgjxs\", \"txecvio\", \"kjxlq\", \"dkuxss\",\n \"mkbevn\", \"pzmdqc\", \"ihyia\", \"atsub\", \"twytus\", \"nzooxj\", \"qwuoly\", \"fdoigo\", \"zukhlh\", \"mugeaxt\", \"qqsfyls\",\n \"qqtql\", \"wrvphcx\", \"nzjfhx\", \"uequtk\", \"fxuto\", \"qnast\", \"nveys\", \"ltbrcth\", \"toctdib\", \"fbpnh\", \"umxfgn\",\n \"zvjuta\", \"yeron\", \"qzvswqk\", \"gbctr\", \"ryryz\", \"zieknd\", \"zcsna\", \"jrhak\", \"zfxqsj\", \"urlba\", \"lbozqf\",\n \"yfcjaa\", \"hazgy\", \"gmmfzyz\", \"zjvkyc\", \"rvfdcf\", \"daitab\", \"hcxqgum\", \"qwakp\", \"ltbsjwo\", \"pqqtygx\",\n \"upxcxao\", \"qylot\", \"lmxqc\", \"dwzcd\", \"tjccm\", \"mqcpap\", \"wgxqtr\", \"ivycvxy\", \"wdykg\", \"snvqka\", \"jxtvtsb\",\n \"jnyowsq\", \"iwfuoig\", \"cuoixhu\", \"fzwalg\", \"djhrar\", \"sjmahk\", \"dyusf\", \"wrxqvdi\", \"ftytlor\", \"jsjbv\",\n \"vjbebg\", \"agvsn\", \"vvmpgm\", \"gsgjopk\", \"vbqvhy\", \"afopf\", \"zybfuz\", \"aqsgc\", \"ytrjsvn\", \"wlhdfr\", \"vdhvl\",\n \"jrlvr\", \"cscxwf\", \"yhgbew\", \"wupbl\", \"ssuhyvv\", \"bhcirzk\", \"oykwk\", \"ijbto\", \"qsnpgw\", \"otwzage\", \"ytqzh\",\n \"rgwow\", \"bvhgkwh\", \"fvawxie\", \"fllxw\", \"gfcqf\", \"scoqb\", \"qubrq\", \"gdxjtp\", \"ahrpck\", \"awnlgi\", \"cmehsyp\",\n \"dwmytpy\", \"firyeq\", \"oohwhr\", \"caelk\", \"mqemvs\", \"qflkzi\", \"tfpibll\", \"ybhzd\", \"ctsxri\", \"yurocj\", \"dnlnl\",\n \"ydmdva\", \"xkaotl\", \"xovax\", \"ypynrqp\", \"kwfzw\", \"fbgsmrc\", \"tutime\", \"rcugul\", \"cvewno\", \"typhbpa\", \"wazew\",\n \"flzfs\", \"wxxbza\", \"ogjfkl\", \"vjlebet\", \"imbubm\", \"xinyncy\", \"dqmxfy\", \"buhagzh\", \"jjadpos\", \"gejyz\", \"gxshqk\",\n \"wkwrs\", \"dqeriqo\", \"dmixr\", \"bysjih\", \"aoloq\", \"ddwhsxs\", \"nteqv\", \"cqagf\", \"ditsrn\", \"wfxgl\", \"jwjqb\",\n \"rvkxj\", \"rxapr\", \"yrlkip\", \"npquasb\", \"nvezlr\", \"gmhchcx\", \"lodfihi\", \"dheypxa\", \"plzjykh\", \"qopsthg\",\n \"zsnes\", \"raongg\", \"zrpnac\", \"tzmtltj\", \"jsecdn\", \"rzudh\", \"hkcyic\", \"xsxmw\", \"reeuwpn\", \"grkwrag\", \"gvzzbsq\",\n \"lrfta\", \"aqyvbkj\", \"ytgfu\", \"wcmvd\", \"olnvfi\", \"hhgmhb\", \"kojmepr\", \"wpohl\", \"szhgg\", \"hymiblu\", \"lkwjr\",\n \"zulqpz\", \"sdcqjo\", \"olgsgez\", \"lxkpqci\", \"yxcgn\", \"gmvex\", \"fskpppe\", \"utzto\", \"axncvp\", \"lcyahba\", \"ydeae\",\n \"zvzar\", \"ghfkkqv\", \"ryrpg\", \"gucpbq\", \"reofjz\", \"cdnoo\", \"dchhh\", \"byiwd\", \"cqbhok\", \"ksfnoa\", \"xsmmlr\",\n \"qyvdfqh\", \"dzshj\", \"bpifnzh\", \"uxmoml\", \"jdxvojf\", \"ihfll\", \"vwesfof\", \"zynnpb\", \"fwzra\", \"rxlgww\", \"vkmjd\",\n \"hcjgzt\", \"mkapfl\", \"ffjqlf\", \"wulaebc\", \"gurramv\", \"tufkzai\", \"bxprqek\", \"nkohv\", \"abgfwyl\", \"slslg\",\n \"wirsnh\", \"pykvuh\", \"fdrwk\", \"gtmgsxe\", \"dxsaab\", \"lqiryty\", \"aoezg\", \"tzhugcg\", \"uoarf\", \"dwhsv\", \"rjiuoi\",\n \"ycgcdnf\", \"rtfmwz\", \"amkjc\", \"woogtdi\", \"deprx\", \"ucknu\", \"womfm\", \"xdeev\", \"qapxpuu\", \"ngulnk\", \"fgtxyf\",\n \"hnyabid\", \"cilmy\", \"wrsewtf\", \"luvtmo\", \"wftuh\", \"ifoeeqp\", \"dtfdhhl\", \"rwnburg\", \"fohkkul\", \"frqqi\",\n \"gsrcyc\", \"teuync\", \"dvpvak\", \"daqjki\", \"kksscp\", \"somsde\", \"tyfvck\", \"ftfekl\", \"ahncv\", \"yvosm\", \"qgllvg\",\n \"ylfwv\", \"jenqns\", \"lqovrnm\", \"iyger\", \"nfvtsv\", \"bknxmqj\", \"pfzybdr\", \"hqjol\", \"chlpk\", \"etgrtqa\", \"msuxdx\",\n \"vnoatf\", \"ypdzomn\", \"vsshmg\", \"rfkipq\", \"jvpbiz\", \"vbskd\", \"edsoixj\", \"uowim\", \"hqtsj\", \"inbsxal\", \"ookrv\",\n \"ipotdnk\", \"kmazqd\", \"jpfghb\", \"gvmnnpv\", \"juvwa\", \"xtkvzw\", \"ejqcl\", \"ebgcnt\", \"ztuyu\", \"dlzthw\", \"zzipe\",\n \"iaxwdxy\", \"htynwkc\", \"lefbq\", \"pizfr\", \"vttrsv\", \"oagak\", \"eqlrom\", \"vttefg\", \"dsrmk\", \"oekbe\", \"cvugzk\",\n \"diwvz\", \"gxmfob\", \"vjowzm\", \"mjpop\", \"uznhz\", \"kqvjwug\", \"wjqvxfg\", \"jbpwezu\", \"wsckdx\", \"slqfomn\", \"omuxk\",\n \"zlgblso\", \"kvitoq\", \"dmafq\", \"djxmzk\", \"pjqfegq\", \"yjrttas\", \"siakcx\", \"iutiqk\", \"nwfdj\", \"gbgtazk\", \"cpqtf\",\n \"panmlr\", \"aqubhsg\", \"iwdim\", \"nqetym\", \"mwazh\", \"thyhy\", \"ydtxan\", \"xfoin\", \"lsosc\", \"esznfa\", \"xgdisi\",\n \"flvbzh\", \"mpltx\", \"iwjpsqp\", \"udfycf\", \"rntmc\", \"ltflwu\", \"wkgbaw\", \"bcuzt\", \"hejxuhb\", \"lguohe\", \"klnhb\",\n \"mjump\", \"avcwrol\", \"yrcqlc\", \"ihxul\", \"avajh\", \"gtpauet\", \"iemzk\", \"rfdub\", \"gqnbk\", \"cfcmg\", \"iobyh\",\n \"iruuapf\", \"tyifwt\", \"sbdtp\", \"mngcpmb\", \"oaqpolm\", \"mmimmh\", \"gxknadi\", \"bmxhuu\", \"ulyoa\", \"keidy\", \"vsnfk\",\n \"cnnnfty\", \"pkajm\", \"ddgeecb\", \"prxidqd\", \"wmenvhd\", \"akjcqo\", \"tnekfef\", \"ipvsi\", \"pzjwq\", \"wmmct\", \"erdjnuf\",\n \"vgeaqs\", \"nlbdx\", \"dpvbe\", \"dgeqz\", \"aiguzh\", \"akawppx\", \"tykrjcs\", \"gvavo\", \"hkyle\", \"yhedx\", \"xzqcg\",\n \"gzdxt\", \"csssbk\", \"tmekrmv\", \"lfsgo\", \"iizahz\", \"aszfd\", \"aybqnsl\", \"vadwxsl\", \"ulmiii\", \"xaxdugp\", \"sfnnsbg\",\n \"dkyruh\", \"qhpqu\", \"amesjd\", \"evjuki\", \"vtqjw\", \"aoabp\", \"qnsuhe\", \"bplbx\", \"fdqok\", \"ozkhgib\", \"cggwzys\",\n \"nbknjay\", \"ooambw\", \"evmvegf\", \"htdlxik\", \"kahcume\", \"bojpn\", \"bhipie\", \"hdyjslw\", \"pbkkq\", \"qwszl\",\n \"fgkbzsd\", \"hejdx\", \"vmcfhgx\", \"puzlmmm\", \"meffil\", \"boakbiz\", \"eczot\", \"fvkkit\", \"jebfx\", \"umvkjg\", \"uikgs\",\n \"rycgpf\", \"rfmfgmy\", \"nveho\", \"bgywqen\", \"gepfma\", \"vquyq\", \"wcercbw\", \"wbpjkxc\", \"rqloeda\", \"omclokx\",\n \"hvotwp\", \"tvqfxxu\", \"qrtghk\", \"hggme\", \"arnmfnt\", \"cxprj\", \"rspdt\", \"hlgfq\", \"dmqel\", \"pcerxk\", \"ptqjc\",\n \"wzreko\", \"kahks\", \"xjnzo\", \"xzzye\", \"xbdeu\", \"koiwkv\", \"jlwkkjr\", \"xzdixoc\", \"xeedvrm\", \"mrtnhqi\", \"jaeann\",\n \"mvubp\", \"olklqf\", \"retbgcj\", \"qxxlhh\", \"cqyyoy\", \"ngwikg\", \"qijte\", \"sjzck\", \"zkmkx\", \"ongtzf\", \"tanow\",\n \"smgntvq\", \"urfgt\", \"xwcroa\", \"kadcpd\", \"cxhgo\", \"walku\", \"kvvcsyt\", \"elwmuxk\", \"bfphtm\", \"vzeumuq\", \"sknvev\",\n \"vbsnfd\", \"grmbg\", \"vjahwt\", \"dmcbmn\", \"smubz\", \"jobbfcv\", \"ujlkm\", \"lcthh\", \"bauuqdu\", \"kjgzgtq\", \"gicjz\",\n \"nugbax\", \"kbnjfiu\", \"sqfpein\", \"obbgfww\", \"ykggxjx\", \"irnmog\", \"xniuv\", \"rqiwycq\", \"hzlgyu\", \"yjtrttv\",\n \"satym\", \"dgqhlkk\", \"rghal\", \"tbekx\", \"kkwmo\", \"eahwhks\", \"bpvmbur\", \"sqtgkj\", \"khboz\", \"enefr\", \"vkzqvt\",\n \"wfruavu\", \"ninomu\", \"ypktaoa\", \"mlpmoit\", \"fxyhjfp\", \"fgnpp\", \"txieja\", \"dprnj\", \"bgyrp\", \"zsqwqrw\", \"stqzki\",\n \"kwiayb\", \"ulbsn\", \"aetje\", \"vwzbb\", \"tedwyqs\", \"cymiruy\", \"jigpoqx\", \"ypuqsc\", \"weletu\", \"gvibea\", \"chhuldm\",\n \"baylv\", \"wdhovo\", \"imfqu\", \"meodnsk\", \"jhlckqw\", \"jolyfh\", \"jsfkrhr\", \"tnbfzvs\", \"egcfht\", \"qnzmyr\", \"owtrqu\",\n \"oqaqu\", \"xftys\", \"goxfftm\", \"sgbnp\", \"bhfvaz\", \"gospa\", \"jwzlvwk\", \"lqncoqd\", \"xxizglc\", \"bwffm\", \"mhpggzr\",\n \"kdaoewx\", \"anviou\", \"mqiij\", \"wkskpn\", \"enougdh\", \"vldnn\", \"gbfgz\", \"ejmbh\", \"qsdrvsx\", \"mrvbz\", \"cqlufpf\",\n \"kbgjlu\", \"njgna\", \"admrmk\", \"pwwsc\", \"gxkot\", \"pdjwh\", \"ejwxt\", \"bpaxufv\", \"iwjzs\", \"xxfsg\", \"vuhgh\",\n \"srytgb\", \"yesvlux\", \"tggnch\", \"cgnbb\", \"fbzbx\", \"aomoqf\", \"zkrvrjg\", \"ueaoz\", \"dppacnl\", \"ewovhxz\", \"kbvee\",\n \"ixeeb\", \"gwgoqm\", \"hlwlxe\", \"fpmkrk\", \"wzjsr\", \"ispwe\", \"garofu\", \"jcmpec\", \"tggeo\", \"yzdeo\", \"axpmln\",\n \"zhnlhck\", \"duyqcn\", \"tpqwqi\", \"jvmaj\", \"bisgoy\", \"mpwmurb\", \"olqla\", \"ecapwan\", \"kcpxn\", \"xcapin\", \"ooctk\",\n \"sgqql\", \"vcyyjxf\", \"ejyom\", \"jsgtha\", \"logxnjg\", \"nypadhj\", \"dprmk\", \"cqkuzb\", \"gratv\", \"tgkjgu\", \"fttcafm\",\n \"tpryi\", \"ubbhw\", \"uwcuyn\", \"zkgohs\", \"snfesz\", \"ifrex\", \"tkbfz\", \"fvvkp\", \"otjiq\", \"lgomjjv\", \"ertracf\",\n \"bregu\", \"kkbizb\", \"hyhvn\", \"zjcnxfl\", \"mceskuj\", \"lmupdq\", \"zdzqzgo\", \"yorppew\", \"fpwtjd\", \"dxvyzt\", \"bbnnu\",\n \"pkycae\", \"ucvapn\", \"dijmkb\", \"nvwwpr\", \"bufkw\", \"zhono\", \"vayxf\", \"hlfwkev\", \"klkvkj\", \"yzgpwg\", \"lcbqr\",\n \"tkkfi\", \"pcgljx\", \"bhduxu\", \"rgfipts\", \"hkjbrr\", \"fobvy\", \"wqmqhxo\", \"yjgvypg\", \"ehgoizl\", \"ipiibzh\",\n \"aqxbxtx\", \"lrtin\", \"fyyuypr\", \"pyrocgm\", \"kwqbg\", \"ukccw\", \"wgsbpvx\", \"pcoivrv\", \"okhxaba\", \"bbuaibf\",\n \"ccvfm\", \"phpst\", \"yxtqiz\", \"cdfbo\", \"sijfljn\", \"gdlhn\", \"bqmbced\", \"tiejf\", \"aurqer\", \"olmyd\", \"prctay\",\n \"lwflhi\", \"bbehvta\", \"oxoda\", \"lklyc\", \"rzedhp\", \"kairil\", \"envan\", \"wdcwfk\", \"xoroddb\", \"womrlr\", \"ruxebe\",\n \"jnpywrd\", \"wrifvz\", \"zkewcd\", \"vllfrn\", \"uvdvjh\", \"bglpya\", \"vzokkbw\", \"apaoqt\", \"xpjizn\", \"xoajmd\", \"xapjwc\",\n \"jcknwg\", \"bjpreep\", \"ffkua\", \"ukcbah\", \"bugvkrf\", \"cbmmfs\", \"cwaczhl\", \"nsqaj\", \"sjeikg\", \"fayqif\", \"slowoh\",\n \"xjpvkpa\", \"ynunjle\", \"bqavt\", \"nkpqudr\", \"neikvd\", \"yuqlzg\", \"pdxbtrb\", \"cashlog\", \"iqiqy\", \"smjmxv\",\n \"zbtpbr\", \"zzamzcv\", \"jmakg\", \"txfswc\", \"pkaym\", \"swlde\", \"utann\", \"mqgpjne\", \"pslfvek\", \"nbiqhb\", \"bzsianu\",\n \"wnxgbi\", \"ahkeeiz\", \"dqdfjg\", \"bptdg\", \"pwita\", \"uqyflq\", \"txabjn\", \"yznjmve\", \"mukcqqf\", \"cxonbf\", \"ixuewjm\",\n \"pzlcat\", \"eikeeo\", \"scwsoa\", \"uaeyw\", \"oeorff\", \"gbqgd\", \"qboqiv\", \"hiulpb\", \"dbbdm\", \"qvdxx\", \"aypxbcn\",\n \"ykjwdbg\", \"pvfxn\", \"shrqyz\", \"zaxtu\", \"pfefgww\", \"jwifrw\", \"zxuud\", \"kpkwhlj\", \"lwptgd\", \"zpdmvsw\", \"takeb\",\n \"ynehl\", \"kixtod\", \"fyrgm\", \"qirzmr\", \"shyvec\", \"xjgzt\", \"bwfvht\", \"wyehh\", \"renzc\", \"nnibax\", \"slhfng\",\n \"yjtecc\", \"lghvbzf\", \"qroxvun\", \"mlsed\", \"rrudho\", \"cyffhh\", \"tjlxahp\", \"xmaepzk\", \"jvdzh\", \"bbvegrw\", \"cebcz\",\n \"odjpeam\", \"guerph\", \"tgmphgo\", \"ohtkqq\", \"jcxojz\", \"haeheae\", \"erydxni\", \"hatjxx\", \"kwmgkjw\", \"wmezvy\",\n \"hsuuvfi\", \"ineek\", \"grkxmhb\", \"alxkt\", \"rmspxdg\"]) == 13956\n assert s.minimumLengthEncoding([\"me\", \"time\"]) == 5\n assert s.minimumLengthEncoding(\n [\"yiyqbv\", \"njqvawn\", \"wnlovvp\", \"vogum\", \"jpolc\", \"zleec\", \"sxdrww\", \"rbowr\", \"xsjorra\", \"kwjsx\", \"vornum\",\n \"echku\", \"kuizegn\", \"rhuvv\", \"eemkh\", \"yshht\", \"pbixoa\", \"cmbxvtr\", \"iupia\", \"nmcbq\", \"mgrjsx\", \"ejvniwt\",\n \"svhsel\", \"kazenhf\", \"fevpm\", \"xcwqfgw\", \"ozikzc\", \"mywnmqt\", \"taorwjm\", \"gcshacq\", \"fgtasq\", \"qexygw\",\n \"ljmbari\", \"zfjudos\", \"rgxuzy\", \"kmzryaf\", \"exjfd\", \"mcqnebz\", \"ptoim\", \"zglfi\", \"fhneaz\", \"rexgc\", \"lhplwyr\",\n \"dthdp\", \"jizetec\", \"obyzg\", \"rqupa\", \"yphttge\", \"wdcdn\", \"wdomtr\", \"hchbd\", \"ytyra\", \"upytftl\", \"swbbi\",\n \"qpcybv\", \"dcoxspd\", \"dftkf\", \"nwjfmj\", \"ojbwy\", \"zofuy\", \"adqkt\", \"kpcply\", \"aeukw\", \"fqblb\", \"xurrbpo\",\n \"veioa\", \"puzvl\", \"bnzvlax\", \"tjzsdcw\", \"jarqr\", \"orxjbg\", \"ilrqdri\", \"syjuoyi\", \"htoqdco\", \"gwslw\", \"dpqyf\",\n \"jnkhv\", \"fpqhpr\", \"baewnvc\", \"caunsf\", \"qhbpe\", \"wlckl\", \"lmoroqe\", \"ddlak\", \"qipwbfp\", \"cefqs\", \"surczp\",\n \"jtmfuro\", \"ezhqau\", \"dlsco\", \"hywoqh\", \"lnifq\", \"hvfmu\", \"cqjdkok\", \"tggdact\", \"rwuowdk\", \"attnl\", \"lwhyq\",\n \"mqtsc\", \"bmwajiy\", \"nyohug\", \"vvfpt\", \"lbyazu\", \"sarwago\", \"iccztck\", \"ugsxcw\", \"rpwza\", \"yofmlll\", \"ulhdzhg\",\n \"lbaqk\", \"bwxxwc\", \"dmsbawg\", \"tjloy\", \"imbrkul\", \"xguke\", \"shlkuq\", \"lizjcdu\", \"kmvykl\", \"ilqxxjm\", \"rtbvvqt\",\n \"qisec\", \"zobzr\", \"thwntt\", \"afpifh\", \"uwiiovy\", \"hgsyecl\", \"pdgnm\", \"mqyesch\", \"suexztu\", \"msguuwu\", \"yrykkv\",\n \"xtoommc\", \"muteu\", \"bamml\", \"kkhlb\", \"jfrnx\", \"wpytor\", \"zzogpt\", \"yryxxt\", \"hzqofjd\", \"ehtildc\", \"ptclf\",\n \"nyltvd\", \"nrret\", \"qqqqt\", \"uuxunf\", \"jajxt\", \"lzdvlc\", \"gpdtjug\", \"hjsso\", \"jairua\", \"qarxuey\", \"rpwwjwv\",\n \"cjqypep\", \"tuzgcs\", \"oytqxb\", \"rgfmud\", \"stnwn\", \"tzzaop\", \"jpuopzg\", \"qeywd\", \"spnstrg\", \"dfwgntg\", \"yjyqk\",\n \"ioowc\", \"duqfg\", \"gmqxe\", \"xhlbby\", \"liurjk\", \"vdujfm\", \"xxyyn\", \"omapgc\", \"koemzbz\", \"ziiyako\", \"pjmhfrv\",\n \"bshtfgj\", \"ihjvt\", \"pnipuw\", \"fajiuj\", \"rdvcqzd\", \"mgknns\", \"ouwkm\", \"ejnklwc\", \"osepl\", \"gplpyvs\", \"paxrddg\",\n \"gsjlpd\", \"lgnmgl\", \"yifeeer\", \"hhnwlol\", \"fcmxs\", \"ilinwgm\", \"udhfdtq\", \"ceefc\", \"xweqx\", \"jfelwod\",\n \"rtywfjo\", \"kzwrgqx\", \"fcjriov\", \"fzytqv\", \"zcpcddo\", \"scpyzow\", \"kbzegu\", \"gclwr\", \"gmiwlp\", \"rtpka\",\n \"yiywuyy\", \"qceot\", \"dtrgn\", \"ntwbu\", \"fxobd\", \"zmxwza\", \"qcksyz\", \"wgbtmm\", \"pzorve\", \"hztydc\", \"jqlay\",\n \"ijdkbk\", \"uzjrps\", \"gfzibk\", \"gsxqj\", \"kgjrkdd\", \"smdeuk\", \"iwizewp\", \"owjie\", \"kcdccu\", \"ifltqr\", \"zrdfbm\",\n \"pznbcsk\", \"mtkpi\", \"cpasir\", \"flrxrm\", \"uxcxnv\", \"htlfcp\", \"ltukxfr\", \"ftbbha\", \"jhgjgyz\", \"qjreroc\",\n \"vcvtbid\", \"nrhlq\", \"gtkpot\", \"gyplqqg\", \"lnorig\", \"fixhufv\", \"ugcug\", \"ndfug\", \"wuorhe\", \"owocnkw\", \"rcnbf\",\n \"ioiiiui\", \"kakwtne\", \"svxtt\", \"wdrxogm\", \"ibrxs\", \"bddqi\", \"jeguac\", \"hlftdw\", \"nutgfjw\", \"krrzvf\", \"amxuloc\",\n \"deozdoe\", \"ovsvk\", \"sfqsl\", \"slgiw\", \"jbjujag\", \"mhiru\", \"uqksech\", \"davosw\", \"nlueljv\", \"rhtvdu\", \"ivdpdqa\",\n \"qnbenpq\", \"dtapqq\", \"hwwfpxl\", \"oyrfosn\", \"goxgmgo\", \"tbvutl\", \"cbbbcm\", \"iiugpk\", \"hinkem\", \"vvaitk\",\n \"pskyf\", \"hdnekg\", \"nqhfn\", \"dqbozx\", \"zcwpko\", \"kafyu\", \"jfegubk\", \"nofqzsk\", \"ujmxxg\", \"akwzemu\", \"yvhxb\",\n \"qqlwofi\", \"hmoecj\", \"qwgtlc\", \"jepvygq\", \"uzggm\", \"fztiews\", \"lvndvf\", \"vulax\", \"znqudh\", \"whgqi\", \"noguo\",\n \"vewkx\", \"uruvgf\", \"ubohmba\", \"aulzi\", \"flvfdlq\", \"yspfie\", \"wugif\", \"qndyiwa\", \"keihmct\", \"rggvn\", \"ojjmuoh\",\n \"sbbcl\", \"cdivmoz\", \"vkusmp\", \"mfddp\", \"kgohwvp\", \"rjbbxw\", \"vsgptj\", \"hbyjoz\", \"gufrv\", \"orxiv\", \"fxcqfw\",\n \"okppik\", \"qlouw\", \"lkryigo\", \"qccvc\", \"ixcnodg\", \"wlfilts\", \"ahqtevp\", \"kkbuha\", \"oehaez\", \"rzczib\", \"vxobk\",\n \"wmetvjs\", \"xfjgeq\", \"eadzl\", \"aeqdvch\", \"czojfq\", \"hxshidl\", \"ofswsj\", \"iwbqcmg\", \"schhwtt\", \"ltyth\", \"wiccu\",\n \"akill\", \"zaaji\", \"qepvfa\", \"mpvrkeu\", \"dcpenm\", \"wdhlk\", \"llqbby\", \"lronwkr\", \"rwtguo\", \"ofnvs\", \"lxdnwzf\",\n \"dctmilf\", \"zhckjd\", \"hajsuac\", \"wpylhy\", \"zhipvm\", \"ihikr\", \"zzwjgvr\", \"gdglrn\", \"skhow\", \"tlqtjl\", \"uypli\",\n \"evdva\", \"civide\", \"iroihm\", \"lvuzid\", \"vexat\", \"ngmvrz\", \"szdhbt\", \"ggrbz\", \"bsmovlt\", \"kguomvl\", \"onzvx\",\n \"nobgxw\", \"tqxemc\", \"vbiyx\", \"fpzpf\", \"ogtvf\", \"yuthri\", \"xszbn\", \"xcuhj\", \"nosnpbp\", \"mowsxg\", \"tfalyy\",\n \"kxombgm\", \"cukrz\", \"krmseq\", \"velzh\", \"kmufxj\", \"nvxlkq\", \"ualvras\", \"wytoucy\", \"qicqyym\", \"pbeujtv\",\n \"haojnbm\", \"xnfffpe\", \"wvoiald\", \"rlyvf\", \"sxamoxw\", \"ztqnmp\", \"biiavx\", \"lnjnzs\", \"arqdjdy\", \"pkrgokc\",\n \"qxswouj\", \"dgqah\", \"mnhzo\", \"ggilb\", \"qscrd\", \"ggvkimw\", \"qlxjys\", \"wximi\", \"aqlhio\", \"iavtvy\", \"grkqf\",\n \"dwrtut\", \"uozutfc\", \"fogxpdb\", \"ydtntlq\", \"vnmpmwp\", \"gtxhwq\", \"mlpihx\", \"yfpjlz\", \"hdvcquq\", \"nunny\",\n \"wklasgp\", \"wxduo\", \"topsqf\", \"tngcpzc\", \"mcrut\", \"pdnsmt\", \"kavaok\", \"seiqsqa\", \"bhgkiyt\", \"mawvhtp\",\n \"domcnrm\", \"fgusghc\", \"wdaufwz\", \"tzpuks\", \"kisndyz\", \"fwyieu\", \"wtdum\", \"ytxhl\", \"yhzkmuv\", \"nppnqe\", \"ccvhj\",\n \"dautnyq\", \"hkaliab\", \"kngan\", \"ebmhiop\", \"vsdkcef\", \"nmpcnd\", \"vxvnl\", \"cwcgu\", \"zsuneh\", \"qjgcmd\", \"awvba\",\n \"rzbisxo\", \"oilqrj\", \"neiazlm\", \"hlyrl\", \"tmiht\", \"lwqxxv\", \"gyblrw\", \"gnnjkb\", \"lrxiln\", \"xlwlseh\", \"npfwcvp\",\n \"yjcdhw\", \"rzndd\", \"orlhmip\", \"gatuojh\", \"osotgvv\", \"owksz\", \"kcocizf\", \"izlev\", \"smigns\", \"wtxfwo\", \"knwizte\",\n \"mqjojzp\", \"lkezye\", \"xqldbu\", \"cvbpyl\", \"aoipbz\", \"asrupt\", \"bdwkesh\", \"jpaykm\", \"pksbg\", \"gdbsibd\", \"lfxpwk\",\n \"rmnfph\", \"yzxwke\", \"xjwyusv\", \"yetar\", \"sytdz\", \"pnystzi\", \"yntcqo\", \"egoorl\", \"aydxu\", \"rfdrfhe\", \"flzkos\",\n \"mmjgev\", \"fbjwmvi\", \"jeouc\", \"lcmkri\", \"aggsb\", \"aaeazai\", \"amyxpey\", \"onxqpg\", \"qrjpxq\", \"zanea\", \"niwsgtv\",\n \"nsqja\", \"utgskd\", \"hlcum\", \"frygtl\", \"xjmqetz\", \"upqddd\", \"vxzdstm\", \"hcmtera\", \"ejstou\", \"xkcguf\", \"bokigdk\",\n \"vurnv\", \"zsgrje\", \"nbxlf\", \"tpilcx\", \"lvepux\", \"xacdtp\", \"amdgx\", \"ubbvnx\", \"xmvznh\", \"tlprri\", \"sthkn\",\n \"xhoad\", \"deotaxo\", \"pqzppmw\", \"xlcpx\", \"qwzrpyp\", \"lujabeb\", \"heskwyy\", \"mzzaaur\", \"vnestcs\", \"rryphdl\",\n \"ibdiabi\", \"eoiyt\", \"znflx\", \"clougix\", \"zzadxw\", \"lrrgtf\", \"lsdoakf\", \"yxfmqx\", \"qhnrry\", \"ktcdmv\", \"veygqu\",\n \"btjlo\", \"fcspsc\", \"gozoazm\", \"xcsqgz\", \"aazae\", \"nkuvask\", \"mzdgjq\", \"sihqdhy\", \"zadrwzw\", \"gzcyuea\",\n \"lpgccic\", \"fqtfuzw\", \"bjoqpkc\", \"oydpkxc\", \"sugnnu\", \"hyvygf\", \"axkxo\", \"rsmzb\", \"dlhqmac\", \"gbqby\", \"npqkj\",\n \"odbtb\", \"bdsib\", \"zyasxv\", \"ifxqcc\", \"lmnjwhr\", \"ibuyu\", \"uzhle\", \"ccpwhjr\", \"vhrojnz\", \"fkzfz\", \"fyesm\",\n \"dnvipvm\", \"jbbqn\", \"qdkgl\", \"xkvvgq\", \"dphugaf\", \"soxbfun\", \"rbgokx\", \"biveiz\", \"vbaqtn\", \"qapydgf\", \"llldu\",\n \"ottjpzu\", \"fwjuc\", \"cawio\", \"gbkwe\", \"rrnnxer\", \"luviy\", \"zsalse\", \"ckwdeox\", \"ozhqocm\", \"vtozfwz\", \"jztole\",\n \"ydqei\", \"bfugz\", \"psawjp\", \"dzlyrwp\", \"izuyrne\", \"rbwcfr\", \"vdvte\", \"usjbqs\", \"zzovkxr\", \"frfkwk\", \"mmtmdd\",\n \"sntka\", \"wachbzo\", \"rmzvj\", \"scbngo\", \"eqiuiwi\", \"qfakk\", \"cckcmt\", \"owhzow\", \"rejdlw\", \"iprsqdq\", \"twwaldw\",\n \"mfilzyk\", \"jygvx\", \"iewbo\", \"irhko\", \"zpazqhn\", \"ndqbg\", \"ayzxqdz\", \"zvpbh\", \"maapq\", \"pzitrfm\", \"qsgsurv\",\n \"viwcfff\", \"wpgenms\", \"tjmvu\", \"czuemc\", \"infxoo\", \"avhbw\", \"nugkqx\", \"xubakjp\", \"ndask\", \"utaqq\", \"njhuxq\",\n \"sdvuex\", \"tfmxqp\", \"bydovjo\", \"bizxjsp\", \"zoozxyv\", \"jegei\", \"gkpqobw\", \"psumbtg\", \"gkgoh\", \"sgcbpql\",\n \"xxkhy\", \"kdorkr\", \"hcomj\", \"ulrpyv\", \"rhplil\", \"tyyochd\", \"xhzul\", \"srdjmns\", \"kgukye\", \"yepvs\", \"xnobsjb\",\n \"umxmtub\", \"wvqasr\", \"igftpzw\", \"exhecn\", \"rreee\", \"jpxuvxh\", \"jriqf\", \"akexunb\", \"ekvdsoe\", \"ytzvj\",\n \"vfrlyae\", \"pmfai\", \"biouzle\", \"xkbce\", \"clzyi\", \"xhjoso\", \"wmxkxb\", \"dqzzig\", \"ydtby\", \"gskwj\", \"wlkwbz\",\n \"zepvllz\", \"zsgqp\", \"blntawk\", \"eynmil\", \"bdqyp\", \"wgtnqbc\", \"rrgaq\", \"gtafuzo\", \"qdiko\", \"kkcsdo\", \"zwqhs\",\n \"kugzbmf\", \"wtvvs\", \"kqsdx\", \"mxsuxiz\", \"pgbgjfe\", \"vodfr\", \"qbvwu\", \"vfwbhgw\", \"ayojye\", \"kolzfqg\", \"xnbecj\",\n \"akbcnf\", \"uutrn\", \"upmesa\", \"marqej\", \"bbucee\", \"bazqbau\", \"qikgsyf\", \"oeayzn\", \"uilxnzr\", \"vpnxknl\",\n \"btgtxgh\", \"vjaav\", \"zaxtzah\", \"msweps\", \"awduwld\", \"gzaep\", \"ngvgc\", \"qpoqdgn\", \"kimndg\", \"qilmmpw\",\n \"oafhlyp\", \"nyelgvw\", \"onymk\", \"feycbc\", \"dhcrx\", \"siqpfly\", \"tyvycmf\", \"huctqp\", \"uscjrp\", \"bbptd\", \"msdmu\",\n \"xlxhye\", \"xnyzcox\", \"kyskda\", \"injdkmp\", \"jiwus\", \"spjylwd\", \"eqcrnt\", \"snfiu\", \"jvwvge\", \"yfeaw\", \"mmdnsjj\",\n \"suzdw\", \"xiupf\", \"rjwjhng\", \"tqvasy\", \"rmibpa\", \"zuqax\", \"prpndnp\", \"efryqe\", \"pwuqfy\", \"wpqlfs\", \"aeswq\",\n \"cxkeiue\", \"jydxzfi\", \"tzfvwp\", \"zzgtw\", \"mupiusx\", \"sojavt\", \"dxmsgq\", \"migjiyj\", \"kixjk\", \"ywwvcpl\",\n \"khzcuo\", \"oykhx\", \"fochin\", \"foxbfkc\", \"sizjg\", \"wrjcvr\", \"ceadd\", \"tvfqgxq\", \"whzhche\", \"dcoeti\", \"mpilfib\",\n \"cphie\", \"ucpnjm\", \"ajltvx\", \"kpizym\", \"vevfsrs\", \"jznrri\", \"yvhxomr\", \"cbcnk\", \"yuwuhu\", \"jywuzed\", \"kqakusq\",\n \"jrnzgfo\", \"mjimzz\", \"mfjybnd\", \"ntqyq\", \"junxxck\", \"myvqajv\", \"kvuqs\", \"obfxw\", \"jwuba\", \"vnrvzvy\", \"aeric\",\n \"vtgda\", \"nkrocpt\", \"ahitg\", \"dzxtr\", \"zswwc\", \"yhxap\", \"fdhiwr\", \"cpxtqv\", \"izbmo\", \"zyioo\", \"vysnoe\",\n \"ouuyvj\", \"cumdhzn\", \"dbsmph\", \"cktjem\", \"vbmxy\", \"utgfyhc\", \"rqdeorp\", \"btnlmd\", \"chxwlt\", \"nsghoqi\",\n \"egycsm\", \"wkanat\", \"lzjyf\", \"donyx\", \"cchqsa\", \"xozzz\", \"yzmnf\", \"jfzuh\", \"dpcpg\", \"hlahz\", \"vobopk\",\n \"lssfeli\", \"ccttzi\", \"glzgqpv\", \"oyqzug\", \"qqhkrr\", \"euwotv\", \"hwbmtz\", \"hiylhly\", \"bppzne\", \"yetyyvs\",\n \"cnbwcby\", \"hzblk\", \"pfjmxt\", \"dsxvt\", \"vvkju\", \"zjrfr\", \"gdbhb\", \"udoad\", \"nbhpzfm\", \"iwetbym\", \"atmly\",\n \"tnxli\", \"myegb\", \"hiwqsk\", \"btrajk\", \"nhrmwn\", \"ftmbecv\", \"xopht\", \"eiikqy\", \"qizanwa\", \"cwxiatf\", \"jshjva\",\n \"llrtkn\", \"zhivu\", \"lmwiu\", \"oaeaqz\", \"oxotfub\", \"jnkafm\", \"juhrmq\", \"mqzbtw\", \"puiaxty\", \"dnahvoj\", \"gaxhz\",\n \"xfnay\", \"iqmlnlq\", \"xudhcg\", \"izpkz\", \"tqttmt\", \"bwnbs\", \"fdufd\", \"vhzyymh\", \"zhqtxr\", \"evbcrv\", \"xvnma\",\n \"dgcwy\", \"cwxzlbz\", \"oodiol\", \"teyim\", \"kqqfjub\", \"ftsqzi\", \"arfztkr\", \"oqlujx\", \"rpkkdov\", \"ptoff\", \"ivxaxr\",\n \"nxeept\", \"cacpl\", \"tehir\", \"spvggl\", \"qfzxkn\", \"bhwkukx\", \"fkdpuq\", \"xdrngre\", \"fnfplq\", \"dzbrl\", \"ufgxu\",\n \"sciec\", \"fgdydvw\", \"nmpaqxi\", \"ydsvfv\", \"natjz\", \"lruyvzf\", \"xznznxp\", \"mhfrh\", \"kddsk\", \"uwatn\", \"uklzs\",\n \"lnuta\", \"ryizc\", \"cvwko\", \"tnzpk\", \"ywpiv\", \"vbvcagq\", \"pzolw\", \"nmyfhg\", \"cshkofj\", \"ksptw\", \"kqejh\",\n \"zgzjqzo\", \"mxzrw\", \"enabosq\", \"vmubgc\", \"sfzcj\", \"hewvk\", \"ewhrq\", \"oifnsmi\", \"izdnvu\", \"cshgtk\", \"mqotuhd\",\n \"gnqgj\", \"rxailbm\", \"iyhxvtu\", \"ncjzklq\", \"zjmnoc\", \"awqwos\", \"ugujppc\", \"spbvfwl\", \"gntsvo\", \"euksu\",\n \"qnvneph\", \"crhmf\", \"brktmf\", \"mvgmr\", \"yzcskrp\", \"tihawec\", \"edqmxpn\", \"fxyymlr\", \"dzfkucm\", \"prldz\",\n \"gplrlhz\", \"bohwr\", \"bhebbk\", \"mmecj\", \"segydd\", \"ptslsb\", \"pyhgw\", \"cwmrq\", \"mjfhflh\", \"xhuid\", \"npxmb\",\n \"izilq\", \"dczhqh\", \"tgfnxtb\", \"zrylvo\", \"lctxrar\", \"ylhrbii\", \"rfxedv\", \"llvhzjq\", \"bjocv\", \"wbnex\", \"cnohnf\",\n \"xahrl\", \"rouvwyc\", \"hbhovgv\", \"dhucp\", \"ncmff\", \"ncsskg\", \"gsjbyin\", \"lroxscf\", \"whfaenl\", \"vsfultg\",\n \"floxkpy\", \"captoai\", \"qwolyex\", \"ggaypn\", \"wzunypd\", \"pjixeu\", \"gxnjkoc\", \"pqiqhn\", \"xakjmgz\", \"vqizkx\",\n \"gdzcxr\", \"kyxwdd\", \"pgxmazn\", \"qeuwf\", \"bduknm\", \"tcrcn\", \"nehgee\", \"wktbcgu\", \"jwqltdt\", \"wczkai\", \"drkqs\",\n \"qhdqnn\", \"oobxirc\", \"lbunv\", \"ifscr\", \"xnfpbrw\", \"yrrdbax\", \"fbocs\", \"tewne\", \"iobixe\", \"zgosas\", \"yhesn\",\n \"xlqwd\", \"pfcen\", \"slsjffx\", \"ilwatrc\", \"mhsmgp\", \"iteghl\", \"aqhufdl\", \"kxgpqcu\", \"ryrcgp\", \"azidf\", \"smlnl\",\n \"rocxvbt\", \"iutfc\", \"loapgbr\", \"musulp\", \"dqcnj\", \"tpgbkfh\", \"wvskii\", \"itkfopo\", \"kytyb\", \"rzahbu\", \"aewptd\",\n \"ohergbb\", \"cadxh\", \"aphwelj\", \"huooyzn\", \"gtttia\", \"izeyhcr\", \"cfvxz\", \"aitaxyp\", \"vypqost\", \"ebfnmif\",\n \"kgiucm\", \"zryyu\", \"oxgnbpt\", \"frpwo\", \"ouqvodl\", \"pdaazh\", \"gxwmf\", \"dozxsjm\", \"yndpsik\", \"zcwvu\", \"mihug\",\n \"jgodklw\", \"ysklw\", \"cfxqv\", \"yqvtz\", \"rctnp\", \"xjywa\", \"kpqyw\", \"hhtegzt\", \"rnwbeoi\", \"uyxqum\", \"jahcwbe\",\n \"jzjns\", \"ovwoaz\", \"oqmsrua\", \"natbejl\", \"deffv\", \"okgbr\", \"paqhy\", \"jkafhte\", \"lifsknp\", \"afmskh\", \"oemdro\",\n \"oxuwov\", \"qtyxa\", \"hkpfsm\", \"ulaubn\", \"tciurw\", \"myohwlo\", \"okuiejb\", \"ormoqsb\", \"gmipz\", \"hterzir\", \"ekxzre\",\n \"xkevge\", \"ihenf\", \"nnhzv\", \"eocjmx\", \"upzal\", \"oounfko\", \"myhbwub\", \"fwipva\", \"pkzzvpd\", \"nrupm\", \"vluzq\",\n \"fxkoyho\", \"atzktr\", \"aomrp\", \"qwpser\", \"ejagmb\", \"cfigelm\", \"bvanb\", \"cgcgabo\", \"hmjvlqt\", \"hxxocf\", \"ftqaud\",\n \"htuipy\", \"bhwmcn\", \"tgyvaqe\", \"lvuwh\", \"yiabzs\", \"rzzavu\", \"fiubm\", \"uuqsb\", \"riyakuf\", \"psscffd\", \"kvckzr\",\n \"fktmnf\", \"ivzqexi\", \"nhxzm\", \"kffjmb\", \"vdzxv\", \"esago\", \"bfikw\", \"gaiuxmz\", \"volokcm\", \"jypcs\", \"psibvs\",\n \"hxaxklf\", \"lmqwgy\", \"spnbimo\", \"mtihak\", \"xikoiy\", \"rmmtv\", \"phaqgxj\", \"zcuwkhk\", \"emodbyb\", \"ztahsya\",\n \"ieiqm\", \"lfoquh\", \"emznnq\", \"pnhlgut\", \"pgvads\", \"cqsjx\", \"lxnjei\", \"zpque\", \"rdjbiyb\", \"sxedpu\", \"potnqva\",\n \"iirkn\", \"rjmnrxd\", \"ksgcd\", \"waeymnh\", \"tizdz\", \"kproa\", \"wpttygd\", \"lvyze\", \"peewvgm\", \"fwtyzbw\", \"zitkk\",\n \"gfgqr\", \"udgvlz\", \"swqspo\", \"ohhvyq\", \"kgyuau\", \"hcerp\", \"pdomlm\", \"twabkk\", \"zfsea\", \"epiwp\", \"xgycjpt\",\n \"jtkdh\", \"mxmdm\", \"rtkzm\", \"qkacy\", \"nuvdiq\", \"agctak\", \"hypgyh\", \"ewtjp\", \"paysolw\", \"bcutebe\", \"xelxyb\",\n \"gzdvrth\", \"vpzfv\", \"cxrkt\", \"admiyzi\", \"lqlmn\", \"zbjpbg\", \"tlvdnli\", \"zetnox\", \"ylcsobo\", \"balajod\", \"igoume\",\n \"sxcgw\", \"sbkkafk\", \"fmndnnw\", \"incsa\", \"jyupkg\", \"uhvvc\", \"rswnbth\", \"nvprfj\", \"figqf\", \"znyidqi\", \"aijper\",\n \"euidr\", \"dftxkze\", \"vnppi\", \"splwifc\", \"fprgafl\", \"ixzaz\", \"mrhqtne\", \"dtkjsy\", \"dsmqrgy\", \"xfscz\", \"cymvmpu\",\n \"vptkfdx\", \"zrgrjq\", \"mqvwsur\", \"hdtlw\", \"ugdpwun\", \"cvxitc\", \"vytvqg\", \"pmtpfz\", \"nfdtdt\", \"umvwjuc\", \"jouxc\",\n \"qpypri\", \"pdhqp\", \"lmise\", \"wlsvcfg\", \"aqdkzcb\", \"qlrmrfz\", \"pbgoyi\", \"xmsskoh\", \"jjdye\", \"xvsdmq\", \"ymjeipy\",\n \"igjyv\", \"uiojvmc\", \"uckoww\", \"grlnyeg\", \"hpglp\", \"omnnyy\", \"iiliir\", \"cnucbcx\", \"pcxvs\", \"hipad\", \"xmiltkj\",\n \"oorwi\", \"qgoxjj\", \"jnmviqs\", \"wpleqn\", \"tudxw\", \"pcogem\", \"hgewaf\", \"niwfexy\", \"vcttgcb\", \"anjgovq\",\n \"epgmscd\", \"mdtru\", \"xvapv\", \"rydjik\", \"kopppcr\", \"mjbsmu\", \"unxoakz\", \"ldpsw\", \"frksjr\", \"vyxxg\", \"yyydri\",\n \"szidq\", \"qvbtd\", \"qratl\", \"xwfov\", \"bzhqyxl\", \"fskrtf\", \"pcpzmnv\", \"xuxwx\", \"vzbevnb\", \"ebaqz\", \"dbpuek\",\n \"ooqwj\", \"gaimp\", \"coelqh\", \"bwuceq\", \"oxpfjt\", \"zrqyc\", \"rwllk\", \"pqunv\", \"ufbnn\", \"tbnjoz\", \"kkqmrxu\",\n \"qyyrm\", \"hislf\", \"wyuck\", \"ubpre\", \"pdioi\", \"aryhv\", \"vdcxv\", \"rkgmaag\", \"czlzokw\", \"gtxuduz\", \"grpijx\",\n \"qzrar\", \"qhues\", \"rmznt\", \"sxxmved\", \"onjzuwl\", \"atbjhip\", \"nrardl\", \"alrocy\", \"cfkip\", \"ihtbf\", \"pqdgm\",\n \"hmokun\", \"dpghac\", \"otwml\", \"mnbzwa\", \"ehetlt\", \"rchvq\", \"lwjgywn\", \"lzdmjo\", \"nvhohdp\", \"tmshcpc\", \"gavjv\",\n \"ycnkv\", \"uynzh\", \"bvpnfjq\", \"lfbem\", \"qberui\", \"vrmmhx\", \"wpbqtfq\", \"jujpx\", \"dujgkof\", \"hrpbso\", \"zhcdt\",\n \"iybngyb\", \"rgeruza\", \"nesyxr\", \"cihgfe\", \"hjgskb\", \"zspxeqm\", \"inzrgyd\", \"crkjq\", \"iooshwp\", \"muvvj\", \"wakis\",\n \"rowibwa\", \"qikwypf\", \"aportho\", \"pubcgx\", \"vqoqpfi\", \"rnpbri\", \"ussjv\", \"looor\", \"xkzvdv\", \"tstegg\",\n \"zgiiokw\", \"rwvyaun\", \"mqqla\", \"asnqp\", \"nghuryl\", \"hlvhn\", \"ecuotnu\", \"judvbu\", \"xgvuw\", \"oeckn\", \"hdhttsg\",\n \"hcyhu\", \"klbyjc\", \"tnrmqnc\", \"mjojxhi\", \"kvdet\", \"vbmevim\", \"oglrzs\", \"afbscdi\", \"zxrffti\", \"firzgmz\",\n \"oenim\", \"wgpua\", \"asiep\", \"kyteq\", \"wpeneca\", \"qixmeoq\", \"zaofon\", \"csxxtr\", \"cpwmnl\", \"feylas\", \"idjuo\",\n \"mrtpvta\", \"jjvmjy\", \"mnljocc\", \"lnvjleq\", \"oognud\", \"rbyneq\", \"rhvomm\", \"fldrkpk\", \"znvrp\", \"myswmz\", \"jiloe\",\n \"juivjmo\", \"ylhbyzl\", \"ndmabkt\", \"sgdvlq\", \"pmnddmi\", \"utpuj\", \"kfisv\", \"nxfeell\", \"mxhgqd\", \"ccvdsdg\",\n \"emtybo\", \"zmkylbt\", \"mmrpi\", \"dkwlgq\", \"iwlappb\", \"uimsrnu\", \"mkxaxmi\", \"tcvll\", \"njggal\", \"kmqud\", \"evgzlh\",\n \"oaxizbp\", \"jiuej\", \"xknlp\", \"cyksydh\", \"gbixmz\", \"vtouyk\", \"sxjpkio\", \"qhubt\", \"kflvnb\", \"sjdfggl\", \"bxozyj\",\n \"xekbh\", \"wtmcb\", \"xtapfco\", \"rnornl\", \"ursdpki\", \"waonim\", \"eibfyed\", \"zniinaz\", \"uyfohq\", \"qcaxlt\",\n \"koyaapa\", \"pjuvbsi\", \"ecpdl\", \"ifaqwm\", \"yyumzc\", \"gvfngfp\", \"lttul\", \"flyza\", \"uasdlme\", \"oklhb\", \"wulkzzv\",\n \"ziwsxo\", \"jqcxiu\", \"qdzrwgm\", \"zjdwy\", \"uumns\", \"emlnp\", \"irnrqp\", \"gqkza\", \"oynpcz\", \"yxyea\", \"zpamf\",\n \"gyehxbv\", \"nplkhcc\", \"rxeekyo\", \"kecgp\", \"gseju\", \"nkisxqf\", \"vlyud\", \"fxxihhm\", \"yjgtml\", \"fehwpdi\",\n \"wclnvyy\", \"lriwrc\", \"ikparv\", \"volfh\", \"ysphh\", \"szrvrv\", \"rqlmz\", \"jyqut\", \"fyftsj\", \"uvwfip\", \"rngwgm\",\n \"mjwaz\", \"roehjki\", \"ploxokr\", \"yjbalp\", \"fspkq\", \"yfxrb\", \"kzulvk\", \"ordxp\", \"vdrrt\", \"wdiojwd\", \"ridzl\",\n \"niykdvu\", \"whyycmn\", \"riwcma\", \"bkhgkrb\", \"nsine\", \"emgtgf\", \"zoymw\", \"ljtvhzb\", \"kfyfdma\", \"piygxdl\",\n \"onfwgdf\", \"fwmkm\", \"vqbljay\", \"icife\", \"bxfli\", \"yeygr\", \"qenhgm\", \"mtxuckj\", \"kdcyx\", \"kwqhfcn\", \"ywkfy\",\n \"prbpw\", \"pheyc\", \"kmnds\", \"cacqs\", \"kvekiqy\", \"bfvfhdy\", \"gxulp\", \"skmcra\", \"exomt\", \"lcxue\", \"mnvvday\",\n \"rsddl\", \"gooegc\", \"udght\", \"doymnin\", \"ccdap\", \"wuive\", \"dyyln\", \"rynust\", \"luxabyg\", \"kdkkyyw\", \"vawqfsy\",\n \"rmeswm\", \"rcxzyv\", \"clpowz\", \"pdntqm\", \"tvjkkmz\", \"iiclw\", \"nhudzen\", \"cybhu\", \"crwtw\", \"enypnh\", \"ygekg\",\n \"hrjwqt\", \"peissge\", \"wangcy\", \"rbpoik\", \"raqulbf\", \"gyisnsj\", \"rgbqn\", \"lgvuzb\", \"djicf\", \"epnuu\", \"nsapc\",\n \"voatgh\", \"yorfehc\", \"jxfttat\", \"wyuivb\", \"bwopl\", \"odwdsh\", \"anchkv\", \"sepvew\", \"qoxxmae\", \"bpvqnj\", \"sngfo\",\n \"buoazou\", \"zhijssa\", \"janng\", \"uvdbd\", \"yfvkqo\", \"lcjii\", \"mvacvrz\", \"xztiar\", \"lpbtrqa\", \"ukbpdx\", \"okaqpgr\",\n \"idgqlj\", \"ewglgo\", \"ruymhi\", \"pcidw\", \"bvuqj\", \"npzch\", \"yppyan\", \"oiguirj\", \"iijvwqj\", \"jvbwjys\", \"yjtunfc\",\n \"iaikra\", \"oduhdgk\", \"ivixur\", \"ibcgai\", \"djzvcbx\", \"lmtsul\", \"lgnwzol\", \"wursq\", \"xsxbqwq\", \"jqvwnc\",\n \"dcwwvtb\", \"vwybnr\", \"bughwjl\", \"rnelxb\", \"hmacv\", \"ufgdygl\", \"aabuat\", \"oynwask\", \"gnfjjf\", \"zipbq\", \"zxstn\",\n \"jdrbprf\", \"jmkvny\", \"rblpql\", \"vykdj\", \"qaakyqw\", \"osbhddb\", \"avgldyy\", \"kvpoa\", \"fnqcliu\", \"zzlninw\",\n \"drsal\", \"omswys\", \"hwqcpct\", \"ecraq\", \"fvhsbjq\", \"raauy\", \"pfmoz\", \"vvqvcm\", \"tbjqjun\", \"jcfbegq\", \"otiwup\",\n \"axvvce\", \"dhpdnx\", \"pennr\", \"hvvmvzv\", \"binezl\", \"ygdmcuo\", \"ypwnqn\", \"aloxdv\", \"ucieh\", \"kovbtag\", \"rgfpaww\",\n \"fpbftg\", \"spjowfr\", \"zridoy\", \"blwbbf\", \"evwlxi\", \"itbcz\", \"hgixuo\", \"qmoqmjb\", \"tkeeis\", \"pjiaq\", \"rbpje\",\n \"ledoui\", \"ubecht\", \"mphdd\", \"uzswsbb\", \"ntsybr\", \"qmnijyp\", \"pqwawe\", \"ltytill\", \"dpnxy\", \"pkxqcol\", \"ayrdi\",\n \"mycnd\", \"knotsn\", \"zvcrjl\", \"qwroblg\", \"vtrktey\", \"dzilezi\", \"wzkxg\", \"varqc\", \"xlpttyc\", \"xxqhnl\", \"jpxywa\",\n \"kjdsh\", \"hdseebw\", \"bxqbp\", \"flazqce\", \"xrtab\", \"rupsfq\", \"asswer\", \"rhqof\", \"hjzdv\", \"addsgax\", \"cuahzjj\",\n \"xwdilr\", \"osqgg\", \"pfhwv\", \"rqorah\", \"ggdlnv\", \"truvaoj\", \"jzuldwf\", \"mjddj\", \"vixtn\", \"eslxoaj\", \"cmoypm\",\n \"jvvzs\", \"oqgxcc\", \"tptls\", \"wwgwbj\", \"tysuhg\", \"xbnqb\", \"iogjvg\", \"fbxdmr\", \"zdvsmx\", \"hiuja\", \"watrt\",\n \"kjawab\", \"entxk\", \"jmnkaox\", \"zznsox\", \"asmzc\", \"soblvp\", \"quyxjw\", \"udrdc\", \"hyylvvw\", \"gzfwxuv\", \"jjqmjw\",\n \"faegxbl\", \"lqjcg\", \"bzmruq\", \"bykuh\", \"miwhd\", \"ykgtwhk\", \"oyobzwi\", \"oltwpua\", \"ctulabr\", \"dwandd\", \"vhuhox\",\n \"vtlknw\", \"ywvln\", \"qemqdeg\", \"akezvx\", \"kjmjpv\", \"vwuftx\", \"kreaxnj\", \"fvfop\", \"cxabs\", \"jfacbje\", \"eecnz\",\n \"cmblit\", \"gfvpoq\", \"whywnh\", \"pghvx\", \"ohgkmf\", \"xxtiwd\", \"nkojni\", \"dlcicnp\", \"bwyvyyd\", \"gifup\", \"vgjfr\",\n \"hhteifi\", \"kjhffq\", \"pawqaxl\", \"yozro\", \"slxluvd\", \"amqcquy\", \"vnnxkr\", \"wgdur\", \"rvawiu\", \"thcwnc\", \"cddut\",\n \"vnrtrv\", \"fnfio\", \"nhvxe\", \"rfdqmj\", \"ucblh\", \"ccbnt\", \"lxckaoy\", \"fnwcbx\", \"gmdbiwt\", \"ypvwjy\", \"cbjazk\",\n \"qmujnm\", \"nsqot\", \"lhcqt\", \"ijxcts\", \"nujrms\", \"itxel\", \"ghukr\", \"qpwitlr\", \"gcafqrn\", \"lcoho\", \"lfzab\",\n \"vwhgceb\", \"vgsgy\", \"jrtgo\", \"ryxlz\", \"deoyq\", \"ybenly\", \"lyysca\", \"sodvazo\", \"hbnnoz\", \"ovgvda\", \"elwtjx\",\n \"soydmn\", \"trdsi\", \"mwwjwo\", \"vupwj\", \"dszpcv\", \"kkhjdj\", \"ewmyo\", \"nmpeq\", \"oepldcq\", \"xttrgu\", \"wbcbxi\",\n \"jakzk\", \"peukyw\", \"fvcqv\", \"xklwuu\", \"hsmva\", \"kslmkq\", \"azllbig\", \"stnzih\", \"wfyud\", \"ihauy\", \"cfxmj\",\n \"pdyogwv\", \"dcqdpa\", \"xhusy\", \"jfpmpmm\", \"odeiiw\", \"ozyaer\", \"uykzvma\", \"tuaznxj\", \"kdnbdki\", \"syrnsem\",\n \"fdysz\", \"hhrpo\", \"fglzfi\", \"vgcqzqm\", \"qhsjr\", \"bvboe\", \"dpfwpvg\", \"mvvry\", \"itnnr\", \"lgykbe\", \"pscow\",\n \"mkrgeqv\", \"czffv\", \"apteht\", \"jeqixsx\", \"ksmbe\", \"zamivv\", \"vvmyo\", \"cwwoce\", \"sppubxc\", \"qaich\", \"nmbxr\",\n \"tfkwfxi\", \"iakhezl\", \"fxujis\", \"fkwffe\", \"antaylq\", \"mmfgstq\", \"zxaacy\", \"zlswx\", \"pbqxil\", \"eupck\",\n \"qzcxpbe\", \"rjalbzr\", \"wioagbq\", \"kreec\", \"zsdcuft\", \"rrdzb\", \"ocdlvq\", \"oxiroo\", \"zcxsqh\", \"wbrsi\", \"fqike\",\n \"oskzupi\", \"thvof\", \"dicbyst\", \"iojwe\", \"hyfizq\", \"yoknhww\", \"nupiyyn\", \"ievah\", \"slcgmxg\", \"cnecpa\", \"lcwsoj\",\n \"hnqsc\", \"ghipbi\", \"exobr\", \"nwpnq\", \"dmhbj\", \"amdbmwl\", \"xfbzovs\", \"puizvu\", \"yvsus\", \"ykysqg\", \"bgqdv\",\n \"zgqbr\", \"zkjpkej\", \"crkot\", \"zciymk\", \"tleogn\", \"sayrmz\", \"elwma\", \"zugjva\", \"uifwsmw\", \"wstrg\", \"xbotd\",\n \"hinsg\", \"qpgyoyp\", \"xzfocdy\", \"mbvuepb\", \"dtphufk\", \"cyapnt\", \"yyehhad\", \"ohdrd\", \"mlibm\", \"qzdfil\",\n \"rdwszqx\", \"bzcbmyn\", \"uarjlg\", \"mtwpqmx\", \"nmagl\", \"cepniel\", \"tylvaa\", \"melhd\", \"jygeneg\", \"fdglfy\",\n \"xcpciu\", \"ayrel\", \"bxceshv\", \"kspyg\", \"iclkaz\", \"ykbzt\", \"nrnkzo\", \"kxkto\", \"fabzszn\", \"edalls\", \"nilmh\",\n \"wwawgnn\", \"gymbtx\", \"mzipa\", \"ajevx\", \"qppisv\", \"otqhsf\", \"ippxak\", \"bixnqd\", \"uqitwo\", \"soxcug\", \"loiscd\",\n \"wqrjk\", \"rqntoa\", \"fzpxlp\", \"tuaob\", \"pyqqms\", \"krbzmmj\", \"aijqpfg\", \"nstqrbu\", \"wmtiahz\", \"joplby\", \"jyszxq\",\n \"jnxtyhe\", \"lbvfv\"]) == 14011\n", "step-ids": [ 5, 7, 8, 10, 12 ] }
[ 5, 7, 8, 10, 12 ]
from django.conf.urls import url from .views.show import show_article, show_articles, export_db urlpatterns = [ url(r'^$', show_articles, name='index'), url(r'^article/$', show_article, name='article'), url(r'^export/$', export_db, name='article'), ]
normal
{ "blob_id": "9fdc7c1eb68a92451d41313861164a915b85fcee", "index": 8988, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [url('^$', show_articles, name='index'), url('^article/$',\n show_article, name='article'), url('^export/$', export_db, name='article')]\n", "step-3": "from django.conf.urls import url\nfrom .views.show import show_article, show_articles, export_db\nurlpatterns = [url('^$', show_articles, name='index'), url('^article/$',\n show_article, name='article'), url('^export/$', export_db, name='article')]\n", "step-4": "from django.conf.urls import url\nfrom .views.show import show_article, show_articles, export_db\n\nurlpatterns = [\n url(r'^$', show_articles, name='index'),\n url(r'^article/$', show_article, name='article'),\n url(r'^export/$', export_db, name='article'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]