code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
"""added personal collection
Revision ID: 43eabda1d630
Revises: 9cad4dfb5125
Create Date: 2018-03-28 13:55:03.557872
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '43eabda1d630'
down_revision = '9cad4dfb5125'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', ['personal_collections'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Gifs', type_='foreignkey')
op.drop_column('Gifs', 'personal_collections')
# ### end Alembic commands ###
|
normal
|
{
"blob_id": "21bdf315c98a4cf69482cc7db41bc30d44781596",
"index": 816,
"step-1": "<mask token>\n\n\ndef upgrade():\n op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),\n nullable=True))\n op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [\n 'personal_collections'], ['id'])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef upgrade():\n op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),\n nullable=True))\n op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [\n 'personal_collections'], ['id'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'Gifs', type_='foreignkey')\n op.drop_column('Gifs', 'personal_collections')\n",
"step-3": "<mask token>\nrevision = '43eabda1d630'\ndown_revision = '9cad4dfb5125'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),\n nullable=True))\n op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [\n 'personal_collections'], ['id'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'Gifs', type_='foreignkey')\n op.drop_column('Gifs', 'personal_collections')\n",
"step-4": "<mask token>\nfrom alembic import op\nimport sqlalchemy as sa\nrevision = '43eabda1d630'\ndown_revision = '9cad4dfb5125'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(),\n nullable=True))\n op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', [\n 'personal_collections'], ['id'])\n\n\ndef downgrade():\n op.drop_constraint(None, 'Gifs', type_='foreignkey')\n op.drop_column('Gifs', 'personal_collections')\n",
"step-5": "\"\"\"added personal collection\n\nRevision ID: 43eabda1d630\nRevises: 9cad4dfb5125\nCreate Date: 2018-03-28 13:55:03.557872\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '43eabda1d630'\ndown_revision = '9cad4dfb5125'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('Gifs', sa.Column('personal_collections', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'Gifs', 'PersonalGifCollections', ['personal_collections'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'Gifs', type_='foreignkey')\n op.drop_column('Gifs', 'personal_collections')\n # ### end Alembic commands ###\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(b)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
a = np.array([1, 2, 3])
b = np.r_[np.repeat(a, 3), np.tile(a, 3)]
print(b)
<|reserved_special_token_1|>
import numpy as np
a = np.array([1, 2, 3])
b = np.r_[np.repeat(a, 3), np.tile(a, 3)]
print(b)
|
flexible
|
{
"blob_id": "f39945f35b13c0918c3ef06224bca65ae6166ebc",
"index": 5892,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(b)\n",
"step-3": "<mask token>\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n",
"step-4": "import numpy as np\na = np.array([1, 2, 3])\nb = np.r_[np.repeat(a, 3), np.tile(a, 3)]\nprint(b)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151000000.0):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2]
br = np.dot(b, r)
return np.exp(-2.0j * np.pi * nu * br / c)
except FileNotFoundError:
print('Failure to load antennae data.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
c = 299792458
data_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'
try:
ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151000000.0):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2]
br = np.dot(b, r)
return np.exp(-2.0j * np.pi * nu * br / c)
except FileNotFoundError:
print('Failure to load antennae data.')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import numpy as np
import pickle
c = 299792458
data_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'
try:
ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151000000.0):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2]
br = np.dot(b, r)
return np.exp(-2.0j * np.pi * nu * br / c)
except FileNotFoundError:
print('Failure to load antennae data.')
<|reserved_special_token_1|>
"""
Utilities for calculations based on antenna positions,
such as baseline and phase factor.
"""
import os
import numpy as np
import pickle
c = 299792458 # m / s
data_prefix = os.path.dirname(os.path.abspath(__file__)) + "/"
try:
ant_pos = dict(pickle.load(open(data_prefix + "ant_dict.pk", "rb")))
def baselength(ant_ID1, ant_ID2):
"""
(Convenience function)
Return the norm of the baseline between antennae
# @ant_ID1 and @ant_ID2
"""
return np.linalg.norm(baseline(ant_ID1, ant_ID2))
def baseline(ant_ID1, ant_ID2):
"""
Calculate the baseline between antennae
# @ant_ID1 and @ant_ID2
by a simple difference of their coordinates.
"""
return ant_pos[ant_ID2] - ant_pos[ant_ID1]
def phase_factor(ant1, ant2, r, nu=151e6):
"""
Calculate the phase factor in the direction @r (l, m)
(we assume that n is of insignificant magnitude)
and at the frequency @nu
between two antennae whose ID #s are @ant1 and @ant2.
When we calculate the baseline (u, v, w), we
assume that w is of insignificant magnitude.
"""
b = baseline(ant1, ant2)[0:2] # kill w
br = np.dot(b, r)
return np.exp(-2j * np.pi * nu * br / c)
except FileNotFoundError:
print("Failure to load antennae data.")
|
flexible
|
{
"blob_id": "c455263b82c04fe2c5cc1e614f10a9962795f87e",
"index": 4349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-3": "<mask token>\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-4": "<mask token>\nimport os\nimport numpy as np\nimport pickle\nc = 299792458\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + '/'\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + 'ant_dict.pk', 'rb')))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151000000.0):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2]\n br = np.dot(b, r)\n return np.exp(-2.0j * np.pi * nu * br / c)\nexcept FileNotFoundError:\n print('Failure to load antennae data.')\n",
"step-5": "\"\"\"\nUtilities for calculations based on antenna positions,\nsuch as baseline and phase factor.\n\"\"\"\n\nimport os\nimport numpy as np\nimport pickle\n\nc = 299792458 # m / s\ndata_prefix = os.path.dirname(os.path.abspath(__file__)) + \"/\"\n\ntry:\n ant_pos = dict(pickle.load(open(data_prefix + \"ant_dict.pk\", \"rb\")))\n\n def baselength(ant_ID1, ant_ID2):\n \"\"\"\n (Convenience function)\n Return the norm of the baseline between antennae\n # @ant_ID1 and @ant_ID2\n \"\"\"\n return np.linalg.norm(baseline(ant_ID1, ant_ID2))\n\n def baseline(ant_ID1, ant_ID2):\n \"\"\"\n Calculate the baseline between antennae\n # @ant_ID1 and @ant_ID2\n by a simple difference of their coordinates.\n \"\"\"\n return ant_pos[ant_ID2] - ant_pos[ant_ID1]\n\n def phase_factor(ant1, ant2, r, nu=151e6):\n \"\"\"\n Calculate the phase factor in the direction @r (l, m)\n (we assume that n is of insignificant magnitude)\n and at the frequency @nu\n between two antennae whose ID #s are @ant1 and @ant2.\n When we calculate the baseline (u, v, w), we\n assume that w is of insignificant magnitude.\n \"\"\"\n b = baseline(ant1, ant2)[0:2] # kill w\n\n br = np.dot(b, r)\n return np.exp(-2j * np.pi * nu * br / c)\n \nexcept FileNotFoundError:\n print(\"Failure to load antennae data.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Game)
admin.site.register(Scrap)
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Game, Scrap
admin.site.register(Game)
admin.site.register(Scrap)
|
flexible
|
{
"blob_id": "7e328992392a4ff2b0e23920a8907e38f63fcff0",
"index": 7168,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n",
"step-3": "from django.contrib import admin\nfrom .models import Game, Scrap\nadmin.site.register(Game)\nadmin.site.register(Scrap)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import datetime
from flask import Flask, render_template, request
import database
import database1
import database2
import getYoutubeVideoLinks as getYT
import os
os.environ["EAI_USERNAME"] = '[email protected]'
os.environ["EAI_PASSWORD"] = 'Testqwerty1!'
from expertai.nlapi.cloud.client import ExpertAiClient
client = ExpertAiClient()
# Output overall sentiment
app = Flask(__name__)
database.create_tables()
database1.create_table()
database2.create_tablee()
language= 'en'
videos = []
@app.route("/", methods=["GET", "POST"])
def home():
if request.method == "POST":
entry_content = request.form.get("content")
output = client.specific_resource_analysis(body={"document": {"text": entry_content}}, params={'language': language, 'resource': 'relevants'})
database2.create_entryss(entry_content, datetime.datetime.today().strftime("%b %d"))
for lemma in output.main_lemmas:
print(lemma.value)
video = getYT.searchVideoForKeyword(lemma.value)
for indivvideo in video:
database.create_entry(entry_content, datetime.datetime.today().strftime("%b %d"), indivvideo)
videos.append(f'{indivvideo}')
return render_template("home.html")
@app.route("/feedback", methods=["GET", "POST"])
def feedback():
if request.method == "POST":
entry_contents = request.form.get("contents")
output = client.specific_resource_analysis(body={"document": {"text": entry_contents}},params={'language': language, 'resource': 'sentiment'})
database1.create_entrys(entry_contents, datetime.datetime.today().strftime("%b %d"), output.sentiment.overall)
print(output.sentiment.overall)
return render_template("feedback.html")
@app.route("/recommendation", methods=["GET", "POST"])
def recommendation():
return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())
@app.route('/negative', methods=["GET", "POST"])
def negative():
return render_template("negative.html", entries=database1.retrieve_entrie())
@app.route('/positive', methods=["GET", "POST"])
def positive():
return render_template("positive.html", entries=database1.retrieve_entrie())
|
normal
|
{
"blob_id": "d0f2d47a786b85367f96897e7cd8c2ef8c577e2b",
"index": 2961,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-2": "<mask token>\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-3": "<mask token>\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\n<mask token>\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-4": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\nimport os\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n",
"step-5": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\n\nimport os\nos.environ[\"EAI_USERNAME\"] = '[email protected]'\nos.environ[\"EAI_PASSWORD\"] = 'Testqwerty1!'\n\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\n\n# Output overall sentiment\n\n\napp = Flask(__name__)\n\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n\nlanguage= 'en'\n\nvideos = []\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n \n if request.method == \"POST\":\n entry_content = request.form.get(\"content\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_content}}, params={'language': language, 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().strftime(\"%b %d\"))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.today().strftime(\"%b %d\"), indivvideo)\n videos.append(f'{indivvideo}')\n \n return render_template(\"home.html\")\n\n\n\[email protected](\"/feedback\", methods=[\"GET\", \"POST\"])\ndef feedback():\n if request.method == \"POST\":\n entry_contents = request.form.get(\"contents\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_contents}},params={'language': language, 'resource': 'sentiment'})\n \n database1.create_entrys(entry_contents, datetime.datetime.today().strftime(\"%b %d\"), output.sentiment.overall)\n print(output.sentiment.overall)\n\n return render_template(\"feedback.html\")\n\n\n\n\[email protected](\"/recommendation\", methods=[\"GET\", \"POST\"])\ndef recommendation(): \n return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=[\"GET\", \"POST\"])\ndef negative():\n return render_template(\"negative.html\", entries=database1.retrieve_entrie())\n\n\[email protected]('/positive', methods=[\"GET\", \"POST\"])\ndef positive():\n return render_template(\"positive.html\", entries=database1.retrieve_entrie())\n\n\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from knox.models import AuthToken
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer
# Register API
class RegisterAPI(generics.CreateAPIView):
permission_classes = [
permissions.AllowAny
]
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
# Login API
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({
'success': True,
}, status=status.HTTP_200_OK)
# Get User API
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({
"success": False
}, status=status.HTTP_200_OK)
|
normal
|
{
"blob_id": "5d6ec1b23dcbc935fe80dd09a2e967eb7e37a363",
"index": 5645,
"step-1": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n <mask token>\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-4": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [permissions.AllowAny]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-5": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\n\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\n# Register API\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [\n permissions.AllowAny\n ]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\n# Login API\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated\n ]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({\n 'success': True,\n }, status=status.HTTP_200_OK)\n\n\n# Get User API\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated,\n ]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n\n print(first_name, last_name, mobile)\n\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n\n return Response({\n \"success\": False\n }, status=status.HTTP_200_OK)\n",
"step-ids": [
8,
9,
10,
14,
15
]
}
|
[
8,
9,
10,
14,
15
] |
import pytest
import torch
from homura.utils.containers import Map, TensorTuple
def test_map():
map = Map(a=1, b=2)
map["c"] = 3
for k, v in map.items():
assert map[k] == getattr(map, k)
for k in ["update", "keys", "items", "values", "clear", "copy", "get", "pop"]:
with pytest.raises(KeyError):
setattr(map, k, 1)
def test_tensortuple():
a = torch.randn(3, 3), torch.randn(3, 3)
t = TensorTuple(a)
assert t[0].dtype == torch.float32
assert t.to(torch.int32)[0].dtype == torch.int32
|
normal
|
{
"blob_id": "c70b4ff26abe3d85e41bfc7a32cf6e1ce4c48d07",
"index": 6291,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-3": "<mask token>\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map['c'] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n for k in ['update', 'keys', 'items', 'values', 'clear', 'copy', 'get',\n 'pop']:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-4": "import pytest\nimport torch\nfrom homura.utils.containers import Map, TensorTuple\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map['c'] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n for k in ['update', 'keys', 'items', 'values', 'clear', 'copy', 'get',\n 'pop']:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-5": "import pytest\nimport torch\n\nfrom homura.utils.containers import Map, TensorTuple\n\n\ndef test_map():\n map = Map(a=1, b=2)\n map[\"c\"] = 3\n for k, v in map.items():\n assert map[k] == getattr(map, k)\n\n for k in [\"update\", \"keys\", \"items\", \"values\", \"clear\", \"copy\", \"get\", \"pop\"]:\n with pytest.raises(KeyError):\n setattr(map, k, 1)\n\n\ndef test_tensortuple():\n a = torch.randn(3, 3), torch.randn(3, 3)\n t = TensorTuple(a)\n assert t[0].dtype == torch.float32\n\n assert t.to(torch.int32)[0].dtype == torch.int32\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def change_brightness(image, max_delta):
"""brightness an image"""
img = tf.image.adjust_brightness(image, max_delta)
return img
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tensorflow as tf
def change_brightness(image, max_delta):
"""brightness an image"""
img = tf.image.adjust_brightness(image, max_delta)
return img
<|reserved_special_token_1|>
#!/usr/bin/env python3
""" brightness an image"""
import tensorflow as tf
def change_brightness(image, max_delta):
"""brightness an image"""
img = tf.image.adjust_brightness(image, max_delta)
return img
|
flexible
|
{
"blob_id": "07e068dbc1ba1bcb85121ee49f2f9337cae188ba",
"index": 9388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-3": "<mask token>\nimport tensorflow as tf\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-4": "#!/usr/bin/env python3\n\"\"\" brightness an image\"\"\"\nimport tensorflow as tf\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(type(funs))
print(type(funs.add))
print('Result: ', funs.add(10, 20))
print('Result: ', fba.add(10, 20))
print(type(fba))
print(a)
print(m.pi)
<|reserved_special_token_0|>
print(p)
print(bps.happy(10, 20))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(type(funs))
print(type(funs.add))
print('Result: ', funs.add(10, 20))
print('Result: ', fba.add(10, 20))
print(type(fba))
print(a)
print(m.pi)
p = User()
print(p)
print(bps.happy(10, 20))
<|reserved_special_token_1|>
import math as m
import functions_by_alexandra as fba
import funs
from functions_by_alexandra import User, a
from pkg import bps, geom
print(type(funs))
print(type(funs.add))
print('Result: ', funs.add(10, 20))
print('Result: ', fba.add(10, 20))
print(type(fba))
print(a)
print(m.pi)
p = User()
print(p)
print(bps.happy(10, 20))
<|reserved_special_token_1|>
import math as m
import functions_by_alexandra as fba
import funs
from functions_by_alexandra import User, a
from pkg import bps, geom
print(type(funs))
print(type(funs.add ))
#
# print(add(2,3))
print("Result: ", funs.add(10, 20))
print("Result: ", fba.add(10,20))
print(type(fba ))
print(a )
print(m.pi)
p = User()
print(p)
#print(functions_by_alexandra.add(10,20))
print(bps.happy(10,20))
|
flexible
|
{
"blob_id": "b53b0e6ff14750bbba3c2e5e2ea2fc5bb1abccec",
"index": 3135,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\n<mask token>\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-3": "<mask token>\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\np = User()\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-4": "import math as m\nimport functions_by_alexandra as fba\nimport funs\nfrom functions_by_alexandra import User, a\nfrom pkg import bps, geom\nprint(type(funs))\nprint(type(funs.add))\nprint('Result: ', funs.add(10, 20))\nprint('Result: ', fba.add(10, 20))\nprint(type(fba))\nprint(a)\nprint(m.pi)\np = User()\nprint(p)\nprint(bps.happy(10, 20))\n",
"step-5": "import math as m\r\n\r\nimport functions_by_alexandra as fba\r\nimport funs\r\nfrom functions_by_alexandra import User, a\r\nfrom pkg import bps, geom\r\n\r\nprint(type(funs))\r\nprint(type(funs.add ))\r\n#\r\n# print(add(2,3))\r\nprint(\"Result: \", funs.add(10, 20))\r\nprint(\"Result: \", fba.add(10,20))\r\nprint(type(fba ))\r\nprint(a )\r\n\r\nprint(m.pi)\r\n\r\n\r\np = User()\r\nprint(p)\r\n\r\n#print(functions_by_alexandra.add(10,20))\r\n\r\n\r\n\r\nprint(bps.happy(10,20))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# views which respond to ajax requests
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.models import User
from social.models import Like, Post, Comment, Notification
from social.notifications import Notify
from social.forms import CommentForm
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template import loader
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from social.collections import Collections
from watson import search as watson
c = Collections()
data = {}
# like or unlike posts, kraks, users or comments
def like(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
# get notification data
if item_type == "post":
liked_object = Post.objects.get(id=item_id)
elif item_type == "comment":
liked_object = Comment.objects.get(id=item_id)
target = liked_object.author if item_type != "user" else liked_object
# user must be authenticated to like/unlike
if request.user.is_authenticated:
like = Like.objects.filter(item_id=item_id, item_type=item_type, user=request.user)
if like.exists():
# unlike
like.delete()
# delete notification
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
else:
# like
like = Like.objects.create(item_id=item_id, item_type=item_type, user=request.user)
# create notification
# NB: users should not be notified of their actions on objects they created
if like.user != target:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="like",
object_id=liked_object.id,
object_type=item_type,
target_id=target.id,
target_type="user"
)
data['auth'] = True
else: # anonymous user
data['auth'] = False
return JsonResponse(data)
# follow or unfollow users
def follow(request):
action = request.POST.get('action') # follow/unfollow
followed_user_id = request.POST.get('followedUserId')
followed_user = User.objects.get(id=followed_user_id)
# users cannot follow themselves
if followed_user == request.user:
return JsonResponse({})
# user must be authenticated to follow/unfollow
if request.user.is_authenticated():
if action == 'follow':
followed_user.profile.followers.add(request.user)
request.user.profile.following.add(followed_user)
# create notification
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
)
elif action == 'unfollow':
followed_user.profile.followers.remove(request.user)
request.user.profile.following.remove(followed_user)
try:
Notification.objects.get(
actor_id=request.user.id,
actor_type="user",
verb="follow",
object_id=followed_user.id,
object_type="user",
target_id=followed_user.id,
target_type="user"
).delete()
except Notification.DoesNotExist:
pass
data['auth'] = True
else:
data['auth'] = False
return JsonResponse(data)
def delete(request):
item_id = request.POST.get('itemId')
item_type = request.POST.get('itemType')
if item_type == 'post':
item = Post.objects.get(id=item_id)
messages.success(request, "Post deleted successfully!")
# delete notifications associated with this post
try:
Notification.objects.filter(
object_id=item.id,
object_type="post"
).delete()
except Notification.DoesNotExist:
pass
elif item_type == 'comment':
item = Comment.objects.get(id=item_id)
messages.success(request, "Comment deleted successfully!")
# delete notifications associated with this comment
try:
Notification.objects.get(
object_id=item.id,
object_type="comment"
).delete()
except Notification.DoesNotExist:
pass
if item.author == request.user:
item.delete()
data['error'] = False
return JsonResponse(data)
def comment(request):
if request.user.is_authenticated():
data['auth'] = True;
form = CommentForm(request.POST)
if form.is_valid():
post_id = request.POST.get('post_id')
content = request.POST.get('content')
page = request.POST.get('page')
post = Post.objects.get(id=post_id)
comment = Comment.objects.create(content=content, post=post, author=request.user)
show_comment_actions = True if page == "post" else False
comment_html = loader.render_to_string(
'social/partials/latest-comment.html', {
'comment': comment,
'current_user': request.user,
'show_comment_actions': show_comment_actions
},
)
data['comment_html'] = comment_html
data['errors'] = False
# create notification
if post.author != comment.author:
Notification.objects.create(
actor_id=request.user.id,
actor_type="user",
verb="comment",
object_id=comment.id,
object_type="comment",
target_id=post.author.id,
target_type="user"
)
else:
data['errors'] = form.errors
else:
data['auth'] = False
return JsonResponse(data)
def clear_image(request):
item_id = int(request.POST.get('itemId'))
item_type = request.POST.get('itemType')
if item_type == 'post':
Post.objects.get(id=item_id, author=request.user).featured_image.delete(save=True)
elif item_type == 'user' and item_id == request.user.id:
User.objects.get(id=item_id).profile.profile_photo.delete(save=True)
messages.success(request, 'Image successfully removed!')
return JsonResponse(data)
#### LAZY LOADING ####
######################
# META
def paginate_list(input_list, page, results_per_page=10):
paginator = Paginator(input_list, results_per_page)
# paginate
try:
output_list = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver 2nd page.
output_list = paginator.page(2)
except EmptyPage:
# If page is out of range (e.g. 9999), return empty list
output_list = []
# push to template
return output_list
def load_feeds(request):
page = request.POST.get('page')
posts = c.feed(request.user)
posts = paginate_list(posts, page, 15)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
return JsonResponse(data)
def load_user_lists(request):
user_list = request.POST.get('userList') # posts, following, followers, liked posts
user_id = request.POST.get('userId')
page = request.POST.get('page')
user = User.objects.get(id=user_id)
if user_list == 'posts':
posts = user.profile.get_posts(request.user)
posts = paginate_list(posts, page)
posts_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = posts.has_next()
data['list_html'] = posts_html
elif user_list == 'following':
following = list(reversed(user.profile.following.all()))
following = paginate_list(following, page)
following_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': following, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = following.has_next()
data['list_html'] = following_html
elif user_list == 'followers':
followers = list(reversed(user.profile.followers.all()))
followers = paginate_list(followers, page)
followers_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': followers, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = followers.has_next()
data['list_html'] = followers_html
elif user_list == 'liked':
liked_posts = c.liked(request.user)
liked_posts = paginate_list(liked_posts, page)
liked_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = liked_posts.has_next()
data['list_html'] = liked_html
return JsonResponse(data)
def load_comments(request):
post_id = request.POST.get('postId')
page = request.POST.get('page')
comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')
comments = paginate_list(comments, page)
comments_html = loader.render_to_string(
'social/partials/comments.html',
{'comments': comments, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = comments.has_next()
data['comments_html'] = comments_html
return JsonResponse(data)
def load_popular(request):
page = request.POST.get('page')
popular_posts = c.popular(request.user)
popular_posts = paginate_list(popular_posts, page, 15)
popular_html = loader.render_to_string(
'social/partials/posts.html',
{'posts': popular_posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = popular_posts.has_next()
data['list_html'] = popular_html
return JsonResponse(data)
def load_users(request):
page = request.POST.get('page')
users = c.popular_users(request.user)
users = paginate_list(users, page, 15)
users_html = loader.render_to_string(
'social/partials/users.html',
{'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL},
)
data['has_next'] = users.has_next()
data['list_html'] = users_html
return JsonResponse(data)
def load_search_results(request):
q = request.POST.get('q')
page = request.POST.get('page')
results = watson.search(q)
results = paginate_list(results, page)
results_html = loader.render_to_string(
'social/partials/search-results.html',
{'results': results},
)
data['has_next'] = results.has_next()
data['results_html'] = results_html
return JsonResponse(data)
def load_notifications(request):
page = request.POST.get('page')
notifs = Notification.objects.filter(target_type="user", target_id=request.user.id).order_by('-created_at')
notifs = paginate_list(notifs, page)
notifications = []
for n in notifs:
notif = Notify(n)
notification = notif.get()
notifications.append({'message': notification, 'date': n.created_at})
# mark unread notification as read
if n.is_read == False:
n.is_read = True
n.save()
notifs_html = loader.render_to_string(
'social/partials/notifications.html',
{'notifications': notifications},
)
data['has_next'] = notifs.has_next()
data['notifs_html'] = notifs_html
return JsonResponse(data)
|
normal
|
{
"blob_id": "0b4f070d30642449536118accffa371a89dd3075",
"index": 8857,
"step-1": "<mask token>\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-3": "<mask token>\n\n\ndef like(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n liked_object = Post.objects.get(id=item_id)\n elif item_type == 'comment':\n liked_object = Comment.objects.get(id=item_id)\n target = liked_object.author if item_type != 'user' else liked_object\n if request.user.is_authenticated:\n like = Like.objects.filter(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.exists():\n like.delete()\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n else:\n like = Like.objects.create(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.user != target:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user')\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\ndef comment(request):\n if request.user.is_authenticated():\n data['auth'] = True\n form = CommentForm(request.POST)\n if form.is_valid():\n post_id = request.POST.get('post_id')\n content = request.POST.get('content')\n page = request.POST.get('page')\n post = Post.objects.get(id=post_id)\n comment = Comment.objects.create(content=content, post=post,\n author=request.user)\n show_comment_actions = True if page == 'post' else False\n comment_html = loader.render_to_string(\n 'social/partials/latest-comment.html', {'comment': comment,\n 'current_user': request.user, 'show_comment_actions':\n show_comment_actions})\n data['comment_html'] = comment_html\n data['errors'] = False\n if post.author != comment.author:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='comment', object_id=comment.id,\n object_type='comment', target_id=post.author.id,\n target_type='user')\n else:\n data['errors'] = form.errors\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-4": "<mask token>\n\n\ndef like(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n liked_object = Post.objects.get(id=item_id)\n elif item_type == 'comment':\n liked_object = Comment.objects.get(id=item_id)\n target = liked_object.author if item_type != 'user' else liked_object\n if request.user.is_authenticated:\n like = Like.objects.filter(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.exists():\n like.delete()\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n else:\n like = Like.objects.create(item_id=item_id, item_type=item_type,\n user=request.user)\n if like.user != target:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='like', object_id=liked_object.\n id, object_type=item_type, target_id=target.id,\n target_type='user')\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef follow(request):\n action = request.POST.get('action')\n followed_user_id = request.POST.get('followedUserId')\n followed_user = User.objects.get(id=followed_user_id)\n if followed_user == request.user:\n return JsonResponse({})\n if request.user.is_authenticated():\n if action == 'follow':\n followed_user.profile.followers.add(request.user)\n request.user.profile.following.add(followed_user)\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=followed_user.\n id, object_type='user', target_id=followed_user.id,\n target_type='user')\n elif action == 'unfollow':\n followed_user.profile.followers.remove(request.user)\n request.user.profile.following.remove(followed_user)\n try:\n Notification.objects.get(actor_id=request.user.id,\n actor_type='user', verb='follow', object_id=\n followed_user.id, object_type='user', target_id=\n followed_user.id, target_type='user').delete()\n except Notification.DoesNotExist:\n pass\n data['auth'] = True\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef delete(request):\n item_id = request.POST.get('itemId')\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n item = Post.objects.get(id=item_id)\n messages.success(request, 'Post deleted successfully!')\n try:\n Notification.objects.filter(object_id=item.id, object_type='post'\n ).delete()\n except Notification.DoesNotExist:\n pass\n elif item_type == 'comment':\n item = Comment.objects.get(id=item_id)\n messages.success(request, 'Comment deleted successfully!')\n try:\n Notification.objects.get(object_id=item.id, object_type='comment'\n ).delete()\n except Notification.DoesNotExist:\n pass\n if item.author == request.user:\n item.delete()\n data['error'] = False\n return JsonResponse(data)\n\n\ndef comment(request):\n if request.user.is_authenticated():\n data['auth'] = True\n form = CommentForm(request.POST)\n if form.is_valid():\n post_id = request.POST.get('post_id')\n content = request.POST.get('content')\n page = request.POST.get('page')\n post = Post.objects.get(id=post_id)\n comment = Comment.objects.create(content=content, post=post,\n author=request.user)\n show_comment_actions = True if page == 'post' else False\n comment_html = loader.render_to_string(\n 'social/partials/latest-comment.html', {'comment': comment,\n 'current_user': request.user, 'show_comment_actions':\n show_comment_actions})\n data['comment_html'] = comment_html\n data['errors'] = False\n if post.author != comment.author:\n Notification.objects.create(actor_id=request.user.id,\n actor_type='user', verb='comment', object_id=comment.id,\n object_type='comment', target_id=post.author.id,\n target_type='user')\n else:\n data['errors'] = form.errors\n else:\n data['auth'] = False\n return JsonResponse(data)\n\n\ndef clear_image(request):\n item_id = int(request.POST.get('itemId'))\n item_type = request.POST.get('itemType')\n if item_type == 'post':\n Post.objects.get(id=item_id, author=request.user\n ).featured_image.delete(save=True)\n elif item_type == 'user' and item_id == request.user.id:\n User.objects.get(id=item_id).profile.profile_photo.delete(save=True)\n messages.success(request, 'Image successfully removed!')\n return JsonResponse(data)\n\n\ndef paginate_list(input_list, page, results_per_page=10):\n paginator = Paginator(input_list, results_per_page)\n try:\n output_list = paginator.page(page)\n except PageNotAnInteger:\n output_list = paginator.page(2)\n except EmptyPage:\n output_list = []\n return output_list\n\n\ndef load_feeds(request):\n page = request.POST.get('page')\n posts = c.feed(request.user)\n posts = paginate_list(posts, page, 15)\n posts_html = loader.render_to_string('social/partials/posts.html', {\n 'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n return JsonResponse(data)\n\n\ndef load_user_lists(request):\n user_list = request.POST.get('userList')\n user_id = request.POST.get('userId')\n page = request.POST.get('page')\n user = User.objects.get(id=user_id)\n if user_list == 'posts':\n posts = user.profile.get_posts(request.user)\n posts = paginate_list(posts, page)\n posts_html = loader.render_to_string('social/partials/posts.html',\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = posts.has_next()\n data['list_html'] = posts_html\n elif user_list == 'following':\n following = list(reversed(user.profile.following.all()))\n following = paginate_list(following, page)\n following_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': following, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = following.has_next()\n data['list_html'] = following_html\n elif user_list == 'followers':\n followers = list(reversed(user.profile.followers.all()))\n followers = paginate_list(followers, page)\n followers_html = loader.render_to_string('social/partials/users.html',\n {'user': request.user, 'users': followers, 'MEDIA_URL':\n settings.MEDIA_URL})\n data['has_next'] = followers.has_next()\n data['list_html'] = followers_html\n elif user_list == 'liked':\n liked_posts = c.liked(request.user)\n liked_posts = paginate_list(liked_posts, page)\n liked_html = loader.render_to_string('social/partials/posts.html',\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = liked_posts.has_next()\n data['list_html'] = liked_html\n return JsonResponse(data)\n\n\ndef load_comments(request):\n post_id = request.POST.get('postId')\n page = request.POST.get('page')\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\n comments = paginate_list(comments, page)\n comments_html = loader.render_to_string('social/partials/comments.html',\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.\n MEDIA_URL})\n data['has_next'] = comments.has_next()\n data['comments_html'] = comments_html\n return JsonResponse(data)\n\n\n<mask token>\n\n\ndef load_users(request):\n page = request.POST.get('page')\n users = c.popular_users(request.user)\n users = paginate_list(users, page, 15)\n users_html = loader.render_to_string('social/partials/users.html', {\n 'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL})\n data['has_next'] = users.has_next()\n data['list_html'] = users_html\n return JsonResponse(data)\n\n\ndef load_search_results(request):\n q = request.POST.get('q')\n page = request.POST.get('page')\n results = watson.search(q)\n results = paginate_list(results, page)\n results_html = loader.render_to_string(\n 'social/partials/search-results.html', {'results': results})\n data['has_next'] = results.has_next()\n data['results_html'] = results_html\n return JsonResponse(data)\n\n\ndef load_notifications(request):\n page = request.POST.get('page')\n notifs = Notification.objects.filter(target_type='user', target_id=\n request.user.id).order_by('-created_at')\n notifs = paginate_list(notifs, page)\n notifications = []\n for n in notifs:\n notif = Notify(n)\n notification = notif.get()\n notifications.append({'message': notification, 'date': n.created_at})\n if n.is_read == False:\n n.is_read = True\n n.save()\n notifs_html = loader.render_to_string('social/partials/notifications.html',\n {'notifications': notifications})\n data['has_next'] = notifs.has_next()\n data['notifs_html'] = notifs_html\n return JsonResponse(data)\n",
"step-5": "# views which respond to ajax requests\r\n\r\nfrom django.contrib import messages\r\nfrom django.conf import settings\r\nfrom django.contrib.auth.models import User\r\nfrom social.models import Like, Post, Comment, Notification\r\nfrom social.notifications import Notify\r\nfrom social.forms import CommentForm\r\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\r\nfrom django.template import loader\r\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\r\nfrom social.collections import Collections\r\nfrom watson import search as watson\r\n\r\nc = Collections()\r\ndata = {}\r\n\r\n# like or unlike posts, kraks, users or comments\r\ndef like(request):\r\n item_id = request.POST.get('itemId')\r\n item_type = request.POST.get('itemType')\r\n\r\n # get notification data\r\n if item_type == \"post\":\r\n liked_object = Post.objects.get(id=item_id)\r\n elif item_type == \"comment\":\r\n liked_object = Comment.objects.get(id=item_id)\r\n target = liked_object.author if item_type != \"user\" else liked_object\r\n\r\n # user must be authenticated to like/unlike\r\n if request.user.is_authenticated:\r\n like = Like.objects.filter(item_id=item_id, item_type=item_type, user=request.user)\r\n if like.exists():\r\n # unlike\r\n like.delete()\r\n # delete notification\r\n try:\r\n Notification.objects.get(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"like\",\r\n object_id=liked_object.id,\r\n object_type=item_type,\r\n target_id=target.id,\r\n target_type=\"user\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n else:\r\n # like\r\n like = Like.objects.create(item_id=item_id, item_type=item_type, user=request.user)\r\n # create notification\r\n # NB: users should not be notified of their actions on objects they created\r\n if like.user != target:\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"like\",\r\n object_id=liked_object.id,\r\n object_type=item_type,\r\n target_id=target.id,\r\n target_type=\"user\"\r\n )\r\n data['auth'] = True\r\n else: # anonymous user\r\n data['auth'] = False\r\n return JsonResponse(data)\r\n\r\n\r\n# follow or unfollow users\r\ndef follow(request):\r\n action = request.POST.get('action') # follow/unfollow\r\n followed_user_id = request.POST.get('followedUserId')\r\n followed_user = User.objects.get(id=followed_user_id)\r\n\r\n # users cannot follow themselves\r\n if followed_user == request.user:\r\n return JsonResponse({})\r\n\r\n # user must be authenticated to follow/unfollow\r\n if request.user.is_authenticated():\r\n if action == 'follow':\r\n followed_user.profile.followers.add(request.user)\r\n request.user.profile.following.add(followed_user)\r\n # create notification\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"follow\",\r\n object_id=followed_user.id,\r\n object_type=\"user\",\r\n target_id=followed_user.id,\r\n target_type=\"user\"\r\n )\r\n elif action == 'unfollow':\r\n followed_user.profile.followers.remove(request.user)\r\n request.user.profile.following.remove(followed_user)\r\n try:\r\n Notification.objects.get(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"follow\",\r\n object_id=followed_user.id,\r\n object_type=\"user\",\r\n target_id=followed_user.id,\r\n target_type=\"user\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n data['auth'] = True\r\n else:\r\n data['auth'] = False\r\n return JsonResponse(data)\r\n\r\n\r\ndef delete(request):\r\n item_id = request.POST.get('itemId')\r\n item_type = request.POST.get('itemType')\r\n\r\n if item_type == 'post':\r\n item = Post.objects.get(id=item_id)\r\n messages.success(request, \"Post deleted successfully!\")\r\n # delete notifications associated with this post\r\n try:\r\n Notification.objects.filter(\r\n object_id=item.id,\r\n object_type=\"post\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n elif item_type == 'comment':\r\n item = Comment.objects.get(id=item_id)\r\n messages.success(request, \"Comment deleted successfully!\")\r\n # delete notifications associated with this comment\r\n try:\r\n Notification.objects.get(\r\n object_id=item.id,\r\n object_type=\"comment\"\r\n ).delete()\r\n except Notification.DoesNotExist:\r\n pass\r\n\r\n if item.author == request.user:\r\n item.delete()\r\n data['error'] = False\r\n return JsonResponse(data)\r\n\r\n\r\ndef comment(request):\r\n if request.user.is_authenticated():\r\n data['auth'] = True;\r\n form = CommentForm(request.POST)\r\n if form.is_valid():\r\n post_id = request.POST.get('post_id')\r\n content = request.POST.get('content')\r\n page = request.POST.get('page')\r\n post = Post.objects.get(id=post_id)\r\n comment = Comment.objects.create(content=content, post=post, author=request.user)\r\n show_comment_actions = True if page == \"post\" else False \r\n comment_html = loader.render_to_string(\r\n 'social/partials/latest-comment.html', {\r\n 'comment': comment, \r\n 'current_user': request.user, \r\n 'show_comment_actions': show_comment_actions\r\n },\r\n )\r\n data['comment_html'] = comment_html\r\n data['errors'] = False\r\n # create notification\r\n if post.author != comment.author:\r\n Notification.objects.create(\r\n actor_id=request.user.id,\r\n actor_type=\"user\",\r\n verb=\"comment\",\r\n object_id=comment.id,\r\n object_type=\"comment\",\r\n target_id=post.author.id,\r\n target_type=\"user\"\r\n )\r\n else:\r\n data['errors'] = form.errors\r\n else:\r\n data['auth'] = False\r\n \r\n return JsonResponse(data)\r\n\r\n\r\ndef clear_image(request):\r\n item_id = int(request.POST.get('itemId'))\r\n item_type = request.POST.get('itemType')\r\n\r\n if item_type == 'post':\r\n Post.objects.get(id=item_id, author=request.user).featured_image.delete(save=True)\r\n elif item_type == 'user' and item_id == request.user.id:\r\n User.objects.get(id=item_id).profile.profile_photo.delete(save=True)\r\n\r\n messages.success(request, 'Image successfully removed!')\r\n return JsonResponse(data)\r\n\r\n\r\n#### LAZY LOADING ####\r\n######################\r\n\r\n# META\r\ndef paginate_list(input_list, page, results_per_page=10):\r\n paginator = Paginator(input_list, results_per_page)\r\n # paginate\r\n try:\r\n output_list = paginator.page(page)\r\n except PageNotAnInteger:\r\n # If page is not an integer, deliver 2nd page.\r\n output_list = paginator.page(2)\r\n except EmptyPage:\r\n # If page is out of range (e.g. 9999), return empty list\r\n output_list = []\r\n # push to template\r\n return output_list\r\n\r\n\r\ndef load_feeds(request):\r\n page = request.POST.get('page')\r\n\r\n posts = c.feed(request.user)\r\n posts = paginate_list(posts, page, 15)\r\n posts_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = posts.has_next()\r\n data['list_html'] = posts_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_user_lists(request):\r\n user_list = request.POST.get('userList') # posts, following, followers, liked posts\r\n user_id = request.POST.get('userId')\r\n page = request.POST.get('page')\r\n user = User.objects.get(id=user_id)\r\n\r\n if user_list == 'posts':\r\n posts = user.profile.get_posts(request.user)\r\n posts = paginate_list(posts, page)\r\n posts_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': posts, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = posts.has_next()\r\n data['list_html'] = posts_html\r\n elif user_list == 'following':\r\n following = list(reversed(user.profile.following.all()))\r\n following = paginate_list(following, page)\r\n following_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': following, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = following.has_next()\r\n data['list_html'] = following_html\r\n elif user_list == 'followers':\r\n followers = list(reversed(user.profile.followers.all()))\r\n followers = paginate_list(followers, page)\r\n followers_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': followers, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = followers.has_next()\r\n data['list_html'] = followers_html\r\n elif user_list == 'liked':\r\n liked_posts = c.liked(request.user)\r\n liked_posts = paginate_list(liked_posts, page)\r\n liked_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': liked_posts, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = liked_posts.has_next()\r\n data['list_html'] = liked_html\r\n return JsonResponse(data)\r\n\r\n \r\ndef load_comments(request):\r\n post_id = request.POST.get('postId')\r\n page = request.POST.get('page')\r\n comments = Comment.objects.filter(post__id=post_id).order_by('-created_at')\r\n comments = paginate_list(comments, page)\r\n comments_html = loader.render_to_string(\r\n 'social/partials/comments.html',\r\n {'comments': comments, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = comments.has_next()\r\n data['comments_html'] = comments_html\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_popular(request):\r\n page = request.POST.get('page')\r\n\r\n popular_posts = c.popular(request.user)\r\n popular_posts = paginate_list(popular_posts, page, 15)\r\n popular_html = loader.render_to_string(\r\n 'social/partials/posts.html',\r\n {'posts': popular_posts, 'user': request.user, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = popular_posts.has_next()\r\n data['list_html'] = popular_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_users(request):\r\n page = request.POST.get('page')\r\n\r\n users = c.popular_users(request.user)\r\n users = paginate_list(users, page, 15)\r\n users_html = loader.render_to_string(\r\n 'social/partials/users.html',\r\n {'user': request.user, 'users': users, 'MEDIA_URL': settings.MEDIA_URL},\r\n )\r\n data['has_next'] = users.has_next()\r\n data['list_html'] = users_html\r\n\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_search_results(request):\r\n q = request.POST.get('q')\r\n page = request.POST.get('page')\r\n results = watson.search(q)\r\n results = paginate_list(results, page)\r\n results_html = loader.render_to_string(\r\n 'social/partials/search-results.html',\r\n {'results': results},\r\n )\r\n data['has_next'] = results.has_next()\r\n data['results_html'] = results_html\r\n return JsonResponse(data)\r\n\r\n\r\ndef load_notifications(request):\r\n page = request.POST.get('page')\r\n notifs = Notification.objects.filter(target_type=\"user\", target_id=request.user.id).order_by('-created_at')\r\n notifs = paginate_list(notifs, page)\r\n notifications = []\r\n for n in notifs:\r\n notif = Notify(n)\r\n notification = notif.get()\r\n notifications.append({'message': notification, 'date': n.created_at})\r\n # mark unread notification as read\r\n if n.is_read == False:\r\n n.is_read = True\r\n n.save()\r\n\r\n notifs_html = loader.render_to_string(\r\n 'social/partials/notifications.html',\r\n {'notifications': notifications},\r\n )\r\n data['has_next'] = notifs.has_next()\r\n data['notifs_html'] = notifs_html\r\n return JsonResponse(data)",
"step-ids": [
6,
9,
11,
12,
16
]
}
|
[
6,
9,
11,
12,
16
] |
#!/usr/bin/env python
###########################################################################
# 1) connect to the MQTT broker
# 2) subscribe to the available data streams
# 3) log to google sheets
# 4) notify on critical events on the telegram channel
###########################################################################
import time
import datetime
import os
import string
import paho.mqtt.client as mqtt
#import requests
#from googleapiclient import discovery
#from oauth2client import client
#from oauth2client import tools
#from oauth2client.file import Storage
import telepot
import json
from influxdb import InfluxDBClient
import sys
DEBUG = False
UTC_OFFSET = 3 # hours of differenc between UTC and local (Jerusalem) time
RECORD_INTERVAL = 5*60 #number if seconds between subsequent recods in google sheets and InfluxDB
NOTIFY_INTERVAL = 1*60 #number if seconds between subsequent notification on telegram
HOME_DIR = '/home/pi' #home directory
localTimeOut = 120 # Local MQTT session timeout
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
last_record = {}
last_notify = {}
# get configuration from json
with open( os.path.join(__location__, 'config.json'), 'r') as f:
config = json.load(f)
telegramToken = config['telegramToken']
RPi_HOST = config['RPi_HOST']
SPREADSHEET_ID = config['SPREADSHEET_ID']
API_KEY = config['API_KEY']
SCOPES = 'https://www.googleapis.com/auth/spreadsheets'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Google Sheets API Python Quickstart'
NUM_ENTRIES_CELL = "InputData!E2"
SHEET_ID = 0
localBroker = RPi_HOST # Local MQTT broker
localPort = 1883 # Local MQTT port
#limits
MAX_TEMPERATURE = 30
MIN_TEMPERATURE = 15
CARBON_MONOXIDE_ADC_THRESH = 5000
GAS_ALL_ADC_THRESH = 12000
WARM_UP_THRESH = 300 # number of seconds from start up, after which start up sensors are sample
topicsOfInterest = ["/sensor/Chipa/humidity",
"/sensor/Chipa/temperature",
"/sensor/Chipa/CO",
"/sensor/Chipa/All_Gas",
"/sensor/livingRoom/alarm",
"/sensor/MotionHUE",
"/empty"
]
def getUTC_TIME():
return datetime.datetime.utcnow()
def pushSample(sample, topic):
global client
client.publish(topic, str(sample))
#Generic Init
print ("Initializing...")
def on_connect(client, userdata, flags, rc):
#MQTT configs
print("Connected with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("#")
def notifyTelegram(message):
print("Notifying Telegram: "+message)
bot.sendMessage(504721552, message)
def isNotifyTime(topic):
timer = time.time()
global last_notify
if topic not in last_notify:
last_notify[topic] = 0
result = True #if event happens for first time, notify
else:
result = (timer - last_notify[topic]) > NOTIFY_INTERVAL
if result == True:
last_notify[topic] = timer # update occurance
return result
def limitsExsess(topic, value):
""" Check the value for limits according to topic.
If out of limit, notify over telegram"""
if isNotifyTime(topic):
if "temperature" in topic:
val = float(value)
if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:
notifyTelegram("Temperature out of bounds: "+value+"degC")
return True
if "CO" in topic:
val = float(value)
if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:
notifyTelegram("Carbon Monoxide level above threshold: "+value)
return True
if "All_Gas" in topic:
val = float(value)
if warmedUp and val > GAS_ALL_ADC_THRESH:
notifyTelegram("Poison gas level above threshold: "+value)
return True
if "alarm" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("ALARM in Living room is On!")
return True
if "MotionHUE" in topic:
val = float(value)
if int(val) == 1:
notifyTelegram("HUE Motion sensor detected movement!")
return True
return False
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
global service
global last_record
currTime = getUTC_TIME()
topic = msg.topic
print("UTCtime: "+currTime.ctime()+","+msg.topic+" "+str(msg.payload))
if topic not in topicsOfInterest:
print("Topic: ",topic," from ",msg," not in the interest list")
return
if "empty" in topic:
return
timer = time.time()
if topic not in last_record:
last_record[topic] = 0 #to assure first time is updated
value = str(msg.payload)
if limitsExsess(topic, value) or ((timer-last_record[topic]) > RECORD_INTERVAL):
print("Updating records")
update_records(topic, value)
last_record[topic] = timer
return
def on_disconnect(client, userdata,rc=0):
print("DisConnected result code "+str(rc))
client.loop_stop()
def on_log(client, userdata, level, buf):
print("UTC: ", time.ctime(), "log: ", buf)
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
#home_dir = os.path.expanduser('~')
home_dir = (HOME_DIR)
credential_dir = os.path.join(home_dir, '.credentials')
print("Credentials folder: ",credential_dir)
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'sheets.googleapis.com-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def create_service():
credentials = get_credentials()
service = discovery.build('sheets', 'v4', credentials=credentials)
return service
def number_of_entries(service):
result = service.spreadsheets().values().get(
spreadsheetId=SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()
value = result.get('values', [])
return int(value[0][0])
def update_records(topic, value):
# Update InfluxDB
receiveTime = getUTC_TIME()
json_body = [
{
"measurement": topic,
"time": receiveTime,
"fields": {
"value": float(value)
}
}
]
print("Writing to InfluxDB: ", json_body)
dbclient.write_points(json_body)
return
''' #update Google Sheets
entries = number_of_entries(service)
currTime = getUTC_TIME()
line_num = str(2 + entries)
range = "InputData!A"+line_num+":D"+line_num
# How the input data should be interpreted.
value_input_option = 'USER_ENTERED'
values = [ [ currTime, topic, value ] ]
body = {'values': values}
request = service.spreadsheets().values().update(
spreadsheetId=SPREADSHEET_ID,
range=range,
valueInputOption=value_input_option,
body=body)
response = request.execute()
update_entries(service,entries+1)
return response '''
def update_entries(service,entries):
#Update Google Sheet
range = NUM_ENTRIES_CELL
value_input_option = 'USER_ENTERED'
values = [
[
entries
] ]
body = {'values': values}
request = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=range,
valueInputOption=value_input_option, body=body
)
response = request.execute()
return response
if __name__ == "__main__":
global service
connectedGoogle = False
connectedMQTT = False
global dbclient
global warmedUp #indicate WARM UP Threshold passed, and gas filters can be sampled
warmedUp = False #indicate WARM UP Threshold passed, and gas filters can be sampled
dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')
startTime = time.time()
#establish Telegram Bot
bot = telepot.Bot(telegramToken)
bot.getMe()
# while not connectedGoogle:
# try:
# service = create_service()
# connectedGoogle = True
# except:
# print ("failed to connect to google sheets, retrying")
# time.sleep(1)
client = mqtt.Client("monitor")
client.on_connect = on_connect
client.on_message = on_message
client.on_log = on_log
while not connectedMQTT:
try:
client.connect(localBroker, localPort, keepalive = 6000)
connectedMQTT = True
except:
print("Connection to MQTT broker failed")
print("exception: ",sys.exc_info()[0])
time.sleep(1)
client.loop_start()
while True:
time.sleep(10)
#client.publish("/empty","0")
if not warmedUp:
warmedUp = (time.time() - startTime) > WARM_UP_THRESH
|
normal
|
{
"blob_id": "0295d6ba962d099e76110c7a0e39748e3163e300",
"index": 5541,
"step-1": "<mask token>\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\n<mask token>\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\n<mask token>\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-3": "<mask token>\nDEBUG = False\nUTC_OFFSET = 3\nRECORD_INTERVAL = 5 * 60\nNOTIFY_INTERVAL = 1 * 60\nHOME_DIR = '/home/pi'\nlocalTimeOut = 120\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(\n __file__)))\nlast_record = {}\nlast_notify = {}\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = 'InputData!E2'\nSHEET_ID = 0\nlocalBroker = RPi_HOST\nlocalPort = 1883\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\nWARM_UP_THRESH = 300\ntopicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',\n '/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',\n '/sensor/MotionHUE', '/empty']\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-4": "import time\nimport datetime\nimport os\nimport string\nimport paho.mqtt.client as mqtt\nimport telepot\nimport json\nfrom influxdb import InfluxDBClient\nimport sys\nDEBUG = False\nUTC_OFFSET = 3\nRECORD_INTERVAL = 5 * 60\nNOTIFY_INTERVAL = 1 * 60\nHOME_DIR = '/home/pi'\nlocalTimeOut = 120\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(\n __file__)))\nlast_record = {}\nlast_notify = {}\nwith open(os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = 'InputData!E2'\nSHEET_ID = 0\nlocalBroker = RPi_HOST\nlocalPort = 1883\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\nWARM_UP_THRESH = 300\ntopicsOfInterest = ['/sensor/Chipa/humidity', '/sensor/Chipa/temperature',\n '/sensor/Chipa/CO', '/sensor/Chipa/All_Gas', '/sensor/livingRoom/alarm',\n '/sensor/MotionHUE', '/empty']\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\nprint('Initializing...')\n\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected with result code ' + str(rc))\n client.subscribe('#')\n\n\ndef notifyTelegram(message):\n print('Notifying Telegram: ' + message)\n bot.sendMessage(504721552, message)\n\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True\n else:\n result = timer - last_notify[topic] > NOTIFY_INTERVAL\n if result == True:\n last_notify[topic] = timer\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n if isNotifyTime(topic):\n if 'temperature' in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram('Temperature out of bounds: ' + value + 'degC')\n return True\n if 'CO' in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram('Carbon Monoxide level above threshold: ' +\n value)\n return True\n if 'All_Gas' in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram('Poison gas level above threshold: ' + value)\n return True\n if 'alarm' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('ALARM in Living room is On!')\n return True\n if 'MotionHUE' in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram('HUE Motion sensor detected movement!')\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print('UTCtime: ' + currTime.ctime() + ',' + msg.topic + ' ' + str(msg.\n payload))\n if topic not in topicsOfInterest:\n print('Topic: ', topic, ' from ', msg, ' not in the interest list')\n return\n if 'empty' in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0\n value = str(msg.payload)\n if limitsExsess(topic, value) or timer - last_record[topic\n ] > RECORD_INTERVAL:\n print('Updating records')\n update_records(topic, value)\n last_record[topic] = timer\n return\n\n\ndef on_disconnect(client, userdata, rc=0):\n print('DisConnected result code ' + str(rc))\n client.loop_stop()\n\n\ndef on_log(client, userdata, level, buf):\n print('UTC: ', time.ctime(), 'log: ', buf)\n\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n home_dir = HOME_DIR\n credential_dir = os.path.join(home_dir, '.credentials')\n print('Credentials folder: ', credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else:\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(spreadsheetId=\n SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n receiveTime = getUTC_TIME()\n json_body = [{'measurement': topic, 'time': receiveTime, 'fields': {\n 'value': float(value)}}]\n print('Writing to InfluxDB: ', json_body)\n dbclient.write_points(json_body)\n return\n\n\n<mask token>\n\n\ndef update_entries(service, entries):\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [[entries]]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=\n SPREADSHEET_ID, range=range, valueInputOption=value_input_option,\n body=body)\n response = request.execute()\n return response\n\n\nif __name__ == '__main__':\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp\n warmedUp = False\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n client = mqtt.Client('monitor')\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive=6000)\n connectedMQTT = True\n except:\n print('Connection to MQTT broker failed')\n print('exception: ', sys.exc_info()[0])\n time.sleep(1)\n client.loop_start()\n while True:\n time.sleep(10)\n if not warmedUp:\n warmedUp = time.time() - startTime > WARM_UP_THRESH\n",
"step-5": "#!/usr/bin/env python\n ###########################################################################\n# 1) connect to the MQTT broker\n# 2) subscribe to the available data streams\n# 3) log to google sheets\n# 4) notify on critical events on the telegram channel\n###########################################################################\n\nimport time\nimport datetime\nimport os\nimport string\nimport paho.mqtt.client as mqtt\n#import requests\n#from googleapiclient import discovery\n#from oauth2client import client\n#from oauth2client import tools\n#from oauth2client.file import Storage\nimport telepot\nimport json\nfrom influxdb import InfluxDBClient\nimport sys\n\nDEBUG = False\nUTC_OFFSET = 3 # hours of differenc between UTC and local (Jerusalem) time\nRECORD_INTERVAL = 5*60 #number if seconds between subsequent recods in google sheets and InfluxDB\nNOTIFY_INTERVAL = 1*60 #number if seconds between subsequent notification on telegram\nHOME_DIR = '/home/pi' #home directory\nlocalTimeOut = 120\t\t\t# Local MQTT session timeout\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nlast_record = {}\nlast_notify = {}\n\n\n# get configuration from json\nwith open( os.path.join(__location__, 'config.json'), 'r') as f:\n config = json.load(f)\n\ntelegramToken = config['telegramToken']\nRPi_HOST = config['RPi_HOST']\nSPREADSHEET_ID = config['SPREADSHEET_ID']\nAPI_KEY = config['API_KEY']\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nCLIENT_SECRET_FILE = 'client_secret.json'\nAPPLICATION_NAME = 'Google Sheets API Python Quickstart'\nNUM_ENTRIES_CELL = \"InputData!E2\"\nSHEET_ID = 0\nlocalBroker = RPi_HOST\t\t# Local MQTT broker\nlocalPort = 1883\t\t\t# Local MQTT port\n\n#limits\nMAX_TEMPERATURE = 30\nMIN_TEMPERATURE = 15\nCARBON_MONOXIDE_ADC_THRESH = 5000\nGAS_ALL_ADC_THRESH = 12000\n\nWARM_UP_THRESH = 300 # number of seconds from start up, after which start up sensors are sample\n\ntopicsOfInterest = [\"/sensor/Chipa/humidity\",\n \"/sensor/Chipa/temperature\",\n \"/sensor/Chipa/CO\",\n \"/sensor/Chipa/All_Gas\",\n \"/sensor/livingRoom/alarm\",\n \"/sensor/MotionHUE\",\n \"/empty\"\n ]\n\n\ndef getUTC_TIME():\n return datetime.datetime.utcnow()\n\n\ndef pushSample(sample, topic):\n global client\n client.publish(topic, str(sample))\n\n\n#Generic Init\nprint (\"Initializing...\")\n\n\ndef on_connect(client, userdata, flags, rc):\n #MQTT configs\n print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"#\")\n\n\ndef notifyTelegram(message):\n print(\"Notifying Telegram: \"+message)\n bot.sendMessage(504721552, message)\n\ndef isNotifyTime(topic):\n timer = time.time()\n global last_notify\n if topic not in last_notify:\n last_notify[topic] = 0\n result = True #if event happens for first time, notify\n else:\n result = (timer - last_notify[topic]) > NOTIFY_INTERVAL\n if result == True: \n last_notify[topic] = timer # update occurance\n return result\n\n\ndef limitsExsess(topic, value):\n \"\"\" Check the value for limits according to topic.\n If out of limit, notify over telegram\"\"\"\n\n if isNotifyTime(topic):\n if \"temperature\" in topic:\n val = float(value)\n if val < MIN_TEMPERATURE or val > MAX_TEMPERATURE:\n notifyTelegram(\"Temperature out of bounds: \"+value+\"degC\")\n return True\n if \"CO\" in topic:\n val = float(value)\n if warmedUp and val > CARBON_MONOXIDE_ADC_THRESH:\n notifyTelegram(\"Carbon Monoxide level above threshold: \"+value)\n return True\n if \"All_Gas\" in topic:\n val = float(value)\n if warmedUp and val > GAS_ALL_ADC_THRESH:\n notifyTelegram(\"Poison gas level above threshold: \"+value)\n return True\n if \"alarm\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"ALARM in Living room is On!\")\n return True\n if \"MotionHUE\" in topic:\n val = float(value)\n if int(val) == 1:\n notifyTelegram(\"HUE Motion sensor detected movement!\")\n return True\n return False\n\n\ndef on_message(client, userdata, msg):\n # The callback for when a PUBLISH message is received from the server.\n global service\n global last_record\n currTime = getUTC_TIME()\n topic = msg.topic\n print(\"UTCtime: \"+currTime.ctime()+\",\"+msg.topic+\" \"+str(msg.payload))\n if topic not in topicsOfInterest:\n print(\"Topic: \",topic,\" from \",msg,\" not in the interest list\")\n return\n if \"empty\" in topic:\n return\n timer = time.time()\n if topic not in last_record:\n last_record[topic] = 0 #to assure first time is updated\n value = str(msg.payload)\n if limitsExsess(topic, value) or ((timer-last_record[topic]) > RECORD_INTERVAL):\n print(\"Updating records\")\n update_records(topic, value)\n last_record[topic] = timer \n return\n\n\ndef on_disconnect(client, userdata,rc=0):\n print(\"DisConnected result code \"+str(rc))\n client.loop_stop()\n\ndef on_log(client, userdata, level, buf):\n print(\"UTC: \", time.ctime(), \"log: \", buf)\n\ndef get_credentials():\n \"\"\"Gets valid user credentials from storage.\n\n If nothing has been stored, or if the stored credentials are invalid,\n the OAuth2 flow is completed to obtain the new credentials.\n\n Returns:\n Credentials, the obtained credential.\n \"\"\"\n #home_dir = os.path.expanduser('~')\n home_dir = (HOME_DIR)\n credential_dir = os.path.join(home_dir, '.credentials')\n print(\"Credentials folder: \",credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sheets.googleapis.com-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials\n\n\ndef create_service():\n credentials = get_credentials()\n service = discovery.build('sheets', 'v4', credentials=credentials)\n return service\n\n\ndef number_of_entries(service):\n result = service.spreadsheets().values().get(\n spreadsheetId=SPREADSHEET_ID, range=NUM_ENTRIES_CELL).execute()\n value = result.get('values', [])\n return int(value[0][0])\n\n\ndef update_records(topic, value):\n\n # Update InfluxDB\n receiveTime = getUTC_TIME()\n json_body = [\n {\n \"measurement\": topic,\n \"time\": receiveTime,\n \"fields\": {\n \"value\": float(value)\n }\n }\n ]\n print(\"Writing to InfluxDB: \", json_body)\n dbclient.write_points(json_body)\n return\n\n''' #update Google Sheets\n entries = number_of_entries(service)\n currTime = getUTC_TIME()\n line_num = str(2 + entries)\n range = \"InputData!A\"+line_num+\":D\"+line_num\n\n # How the input data should be interpreted.\n value_input_option = 'USER_ENTERED'\n\n values = [ [ currTime, topic, value ] ]\n body = {'values': values}\n\n request = service.spreadsheets().values().update(\n spreadsheetId=SPREADSHEET_ID, \n range=range, \n valueInputOption=value_input_option, \n body=body)\n\n response = request.execute()\n update_entries(service,entries+1)\n\n return response '''\n\n\ndef update_entries(service,entries):\n #Update Google Sheet\n range = NUM_ENTRIES_CELL\n value_input_option = 'USER_ENTERED'\n values = [\n [\n entries\n ] ]\n body = {'values': values}\n request = service.spreadsheets().values().update(spreadsheetId=SPREADSHEET_ID, range=range,\n valueInputOption=value_input_option, body=body\n )\n response = request.execute()\n\n return response\n\nif __name__ == \"__main__\":\n global service\n connectedGoogle = False\n connectedMQTT = False\n global dbclient\n global warmedUp #indicate WARM UP Threshold passed, and gas filters can be sampled\n warmedUp = False #indicate WARM UP Threshold passed, and gas filters can be sampled\n dbclient = InfluxDBClient(RPi_HOST, 8086, 'leo', '333', 'sensors')\n startTime = time.time()\n\n #establish Telegram Bot\n bot = telepot.Bot(telegramToken)\n bot.getMe()\n\n # while not connectedGoogle:\n # try:\n # service = create_service()\n # connectedGoogle = True\n # except:\n # print (\"failed to connect to google sheets, retrying\")\n # time.sleep(1)\n\n client = mqtt.Client(\"monitor\")\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_log = on_log\n\n\n while not connectedMQTT:\n try:\n client.connect(localBroker, localPort, keepalive = 6000)\n connectedMQTT = True\n except:\n print(\"Connection to MQTT broker failed\")\n print(\"exception: \",sys.exc_info()[0])\n time.sleep(1)\n \n client.loop_start()\n while True:\n time.sleep(10)\n #client.publish(\"/empty\",\"0\")\n if not warmedUp:\n warmedUp = (time.time() - startTime) > WARM_UP_THRESH\n",
"step-ids": [
13,
15,
16,
17,
18
]
}
|
[
13,
15,
16,
17,
18
] |
from bacalhau.tei_document import TEIDocument
import nltk
import unittest
class TestDocument(unittest.TestCase):
def setUp(self):
self.filepath = 'tests/corpus/a.xml'
self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.
WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),
'//tei:body/tei:div[@type = "dummy"]')
def test_get_text_count(self):
self.assertEqual(2, self.doc.get_text_count())
def test_get_texts(self):
texts = self.doc.get_texts()
self.assertEqual(2, len(texts))
def test_get_term_data(self):
term_data = self.doc.get_term_data()
self.assertIsNotNone(term_data)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f86d01c4b980ac44dcdb1b0008493e1dbda25971",
"index": 4544,
"step-1": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "from bacalhau.tei_document import TEIDocument\nimport nltk\nimport unittest\n\n\nclass TestDocument(unittest.TestCase):\n\n def setUp(self):\n self.filepath = 'tests/corpus/a.xml'\n self.doc = TEIDocument(self.filepath, nltk.tokenize.regexp.\n WordPunctTokenizer(), nltk.corpus.stopwords.words('english'),\n '//tei:body/tei:div[@type = \"dummy\"]')\n\n def test_get_text_count(self):\n self.assertEqual(2, self.doc.get_text_count())\n\n def test_get_texts(self):\n texts = self.doc.get_texts()\n self.assertEqual(2, len(texts))\n\n def test_get_term_data(self):\n term_data = self.doc.get_term_data()\n self.assertIsNotNone(term_data)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
3,
5,
6,
7
]
}
|
[
3,
5,
6,
7
] |
# -*- coding: utf-8 -*-
from handlers.base import Base
class Home(Base):
def start(self):
from movuca import DataBase, User
from datamodel.article import Article, ContentType, Category
from datamodel.ads import Ads
self.db = DataBase([User, ContentType, Category, Article, Ads])
def pre_render(self):
# obrigatorio ter um config, um self.response|request, que tenha um render self.response.render
self.response = self.db.response
self.request = self.db.request
self.config = self.db.config
#self.view = "app/home.html"
self.response.meta.title = self.db.config.meta.title
self.response.meta.description = self.db.config.meta.description
self.response.meta.keywords = self.db.config.meta.keywords
self.context.use_facebook = self.db.config.auth.use_facebook
def last_articles(self):
from helpers.article import latest_articles
self.context.latest_articles = latest_articles(self.db)
def ads(self):
self.context.ads = self.db(self.db.Ads.place == "top_slider").select(limitby=(0, 5), orderby="<random>")
if not self.context.ads:
from gluon.storage import Storage
self.context.ads = [Storage(id=1, thumbnail='', link=self.db.CURL('contact', 'ads')),
Storage(id=2, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your add here!"), link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placekitten.com/250/220", link=self.db.CURL('contact', 'ads')),
Storage(id=3, thumbnail="http://placehold.it/250x220&text=%s" % self.db.T("Your Logo"), link=self.db.CURL('contact', 'ads'))
]
def featured(self):
self.context.featured = self.db(self.db.Article.featured == True).select(limitby=(0, 4), orderby="<random>")
if not self.context.featured:
self.context.featured = self.db(self.db.Article).select(limitby=(0, 4), orderby=~self.db.Article.likes)
|
normal
|
{
"blob_id": "9d0d4707cc9a654752dd0b98fe0fec6a0c1419a1",
"index": 3029,
"step-1": "<mask token>\n\n\nclass Home(Base):\n <mask token>\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n <mask token>\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-2": "<mask token>\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n <mask token>\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-3": "<mask token>\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-4": "from handlers.base import Base\n\n\nclass Home(Base):\n\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == 'top_slider').select(\n limitby=(0, 5), orderby='<random>')\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.\n CURL('contact', 'ads')), Storage(id=2, thumbnail=\n 'http://placehold.it/250x220&text=%s' % self.db.T(\n 'Your add here!'), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail='http://placekitten.com/250/220',\n link=self.db.CURL('contact', 'ads')), Storage(id=3,\n thumbnail='http://placehold.it/250x220&text=%s' % self.db.T\n ('Your Logo'), link=self.db.CURL('contact', 'ads'))]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True\n ).select(limitby=(0, 4), orderby='<random>')\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby\n =(0, 4), orderby=~self.db.Article.likes)\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom handlers.base import Base\n\n\nclass Home(Base):\n def start(self):\n from movuca import DataBase, User\n from datamodel.article import Article, ContentType, Category\n from datamodel.ads import Ads\n self.db = DataBase([User, ContentType, Category, Article, Ads])\n\n def pre_render(self):\n # obrigatorio ter um config, um self.response|request, que tenha um render self.response.render\n self.response = self.db.response\n self.request = self.db.request\n self.config = self.db.config\n #self.view = \"app/home.html\"\n self.response.meta.title = self.db.config.meta.title\n self.response.meta.description = self.db.config.meta.description\n self.response.meta.keywords = self.db.config.meta.keywords\n self.context.use_facebook = self.db.config.auth.use_facebook\n\n def last_articles(self):\n from helpers.article import latest_articles\n self.context.latest_articles = latest_articles(self.db)\n\n def ads(self):\n self.context.ads = self.db(self.db.Ads.place == \"top_slider\").select(limitby=(0, 5), orderby=\"<random>\")\n if not self.context.ads:\n from gluon.storage import Storage\n self.context.ads = [Storage(id=1, thumbnail='', link=self.db.CURL('contact', 'ads')),\n Storage(id=2, thumbnail=\"http://placehold.it/250x220&text=%s\" % self.db.T(\"Your add here!\"), link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail=\"http://placekitten.com/250/220\", link=self.db.CURL('contact', 'ads')),\n Storage(id=3, thumbnail=\"http://placehold.it/250x220&text=%s\" % self.db.T(\"Your Logo\"), link=self.db.CURL('contact', 'ads'))\n ]\n\n def featured(self):\n self.context.featured = self.db(self.db.Article.featured == True).select(limitby=(0, 4), orderby=\"<random>\")\n if not self.context.featured:\n self.context.featured = self.db(self.db.Article).select(limitby=(0, 4), orderby=~self.db.Article.likes)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module implements helpers for GN SDK e2e tests.
"""
# Note, this is run on bots, which only support python2.7.
# Be sure to only use python2.7 features in this module.
import os
import signal
import sys
import subprocess
from subprocess import Popen, PIPE
class popen:
"""Runs subprocess.Popen and returns the process object.
This is meant to be used as a context manager. For example:
with popen(['echo', 'hello']) as p:
# Use p here
This object ensures that any child processes spawned by the command
are killed by forcing the subprocess to use a process group. This
prevents e.g. the emulator from sticking around as a zombie process
after the test is complete.
Args:
command -- The list of command line arguments.
"""
def __init__(self, command):
self._command = command
self._process = None
def __enter__(self):
self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,
close_fds=True, preexec_fn=os.setsid)
return self._process
def __exit__(self, type, value, traceback):
if self._process.poll() is None:
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
|
normal
|
{
"blob_id": "bbb3d27ce8f4c1943ecc7ab542346c9f41cbd30e",
"index": 1256,
"step-1": "<mask token>\n\n\nclass popen:\n <mask token>\n\n def __init__(self, command):\n self._command = command\n self._process = None\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass popen:\n <mask token>\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-3": "<mask token>\n\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-4": "<mask token>\nimport os\nimport signal\nimport sys\nimport subprocess\nfrom subprocess import Popen, PIPE\n\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n",
"step-5": "# Copyright 2020 The Fuchsia Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\"\"\"This module implements helpers for GN SDK e2e tests.\n\"\"\"\n\n# Note, this is run on bots, which only support python2.7.\n# Be sure to only use python2.7 features in this module.\n\nimport os\nimport signal\nimport sys\nimport subprocess\nfrom subprocess import Popen, PIPE\n\nclass popen:\n \"\"\"Runs subprocess.Popen and returns the process object.\n\n This is meant to be used as a context manager. For example:\n\n with popen(['echo', 'hello']) as p:\n # Use p here\n\n This object ensures that any child processes spawned by the command\n are killed by forcing the subprocess to use a process group. This\n prevents e.g. the emulator from sticking around as a zombie process\n after the test is complete.\n\n Args:\n command -- The list of command line arguments.\n \"\"\"\n def __init__(self, command):\n self._command = command\n self._process = None\n\n def __enter__(self):\n self._process = Popen(self._command, stdout=PIPE, stderr=PIPE,\n close_fds=True, preexec_fn=os.setsid)\n return self._process\n\n def __exit__(self, type, value, traceback):\n if self._process.poll() is None:\n os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)\n\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from django.contrib.auth import logout
from rest_framework import status
from rest_framework.authtoken.models import Token
from .serilizer import UserSerializer
class RegistrationView(APIView):
serializer_class = UserSerializer
def post(self,request):
serilizer = UserSerializer(data= request.data)
if serilizer.is_valid():
account = serilizer.save()
user_name = serilizer.validated_data['user_name']
data = { 'response': "user with username " + str(user_name) + ' created'}
data['key'] = get_object_or_404(Token,user = account).key
return Response( data ,status = status.HTTP_201_CREATED )
else :
return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)
class LogoutView(APIView):
def get(self,request):
logout(request)
return Response({"response" : "logged out"},status=status.HTTP_200_OK)
|
normal
|
{
"blob_id": "6a5a6bdb0740d51426aa8b36dd3cc317103412b1",
"index": 641,
"step-1": "<mask token>\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass RegistrationView(APIView):\n <mask token>\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-4": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self, request):\n serilizer = UserSerializer(data=request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = {'response': 'user with username ' + str(user_name) +\n ' created'}\n data['key'] = get_object_or_404(Token, user=account).key\n return Response(data, status=status.HTTP_201_CREATED)\n else:\n return Response(serilizer.errors, status=status.\n HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n\n def get(self, request):\n logout(request)\n return Response({'response': 'logged out'}, status=status.HTTP_200_OK)\n",
"step-5": "from rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.response import Response\nfrom django.contrib.auth import logout\nfrom rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom .serilizer import UserSerializer\n\nclass RegistrationView(APIView):\n serializer_class = UserSerializer\n\n def post(self,request):\n serilizer = UserSerializer(data= request.data)\n if serilizer.is_valid():\n account = serilizer.save()\n user_name = serilizer.validated_data['user_name']\n data = { 'response': \"user with username \" + str(user_name) + ' created'}\n data['key'] = get_object_or_404(Token,user = account).key\n return Response( data ,status = status.HTTP_201_CREATED )\n else :\n return Response(serilizer.errors,status = status.HTTP_400_BAD_REQUEST)\n\n\nclass LogoutView(APIView):\n def get(self,request):\n logout(request)\n return Response({\"response\" : \"logged out\"},status=status.HTTP_200_OK)",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
from flask import request, Flask
import lock, shelve
app = Flask(__name__)
@app.route("/unlock")
def web_unlock():
if not (request.args.get("token") and request.args.get("state")):
return "Error"
else:
with shelve.open("Settings.conf") as settings:
if "token" in settings:
token = settings["token"]
else:
return "System not setup !"
if request.args.get("token") != token:
return "Invalid Token"
if request.args.get("state") == "open":
lock.unlock()
elif request.args.get("state") == "close":
lock.lock()
elif request.args.get("state") == "switch":
lock.switch()
else:
return "Invalid State"
return "Done"
@app.route("/state")
def web_state():
return str(lock.state())
if __name__ == "__main__":
app.run(debug=True, port=5000, host="0.0.0.0")
|
normal
|
{
"blob_id": "ee0f90b84df73ae5783ca0b8a52fe6fe9c979f15",
"index": 2576,
"step-1": "<mask token>\n\n\[email protected]('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\[email protected]('/state')\ndef web_state():\n return str(lock.state())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\[email protected]('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\[email protected]('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-4": "from flask import request, Flask\nimport lock, shelve\napp = Flask(__name__)\n\n\[email protected]('/unlock')\ndef web_unlock():\n if not (request.args.get('token') and request.args.get('state')):\n return 'Error'\n else:\n with shelve.open('Settings.conf') as settings:\n if 'token' in settings:\n token = settings['token']\n else:\n return 'System not setup !'\n if request.args.get('token') != token:\n return 'Invalid Token'\n if request.args.get('state') == 'open':\n lock.unlock()\n elif request.args.get('state') == 'close':\n lock.lock()\n elif request.args.get('state') == 'switch':\n lock.switch()\n else:\n return 'Invalid State'\n return 'Done'\n\n\[email protected]('/state')\ndef web_state():\n return str(lock.state())\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000, host='0.0.0.0')\n",
"step-5": "from flask import request, Flask\nimport lock, shelve\n\napp = Flask(__name__)\[email protected](\"/unlock\")\ndef web_unlock():\n if not (request.args.get(\"token\") and request.args.get(\"state\")):\n return \"Error\"\n else:\n with shelve.open(\"Settings.conf\") as settings:\n if \"token\" in settings:\n token = settings[\"token\"]\n else:\n return \"System not setup !\"\n if request.args.get(\"token\") != token:\n return \"Invalid Token\"\n if request.args.get(\"state\") == \"open\":\n lock.unlock()\n elif request.args.get(\"state\") == \"close\":\n lock.lock()\n elif request.args.get(\"state\") == \"switch\":\n lock.switch()\n else:\n return \"Invalid State\"\n return \"Done\"\n\[email protected](\"/state\")\ndef web_state():\n return str(lock.state())\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=5000, host=\"0.0.0.0\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 3.0 on 2020-05-04 16:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('game_skeleton', '0001_initial'),
('contenttypes', '0002_remove_content_type_name'),
('class_room', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserHero',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('datetime_finished', models.DateTimeField(blank=True, null=True)),
('capacity', models.FloatField()),
('wallet', models.DecimalField(decimal_places=4, default=0.0, max_digits=10)),
('hero_class', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='game_skeleton.HeroClass')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heroes', to='class_room.User')),
],
),
migrations.CreateModel(
name='EventHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('is_draft', models.BooleanField(default=False, help_text='Draft note does not participate in hero capacity calculation.')),
('datetime_created', models.DateTimeField(auto_now=True)),
('datetime_edited', models.DateTimeField(auto_now_add=True)),
('author', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='class_room.User')),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='class_room.User')),
],
options={
'verbose_name_plural': 'User`s history events',
},
),
]
|
normal
|
{
"blob_id": "a718d82713503c4ce3d94225ff0db04991ad4094",
"index": 9744,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',\n '0002_remove_content_type_name'), ('class_room', '0001_initial')]\n operations = [migrations.CreateModel(name='UserHero', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('datetime_created', models.\n DateTimeField(auto_now=True)), ('datetime_edited', models.\n DateTimeField(auto_now_add=True)), ('datetime_finished', models.\n DateTimeField(blank=True, null=True)), ('capacity', models.\n FloatField()), ('wallet', models.DecimalField(decimal_places=4,\n default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='heroes', to=\n 'class_room.User'))]), migrations.CreateModel(name='EventHistory',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('object_id', models.\n PositiveIntegerField()), ('is_draft', models.BooleanField(default=\n False, help_text=\n 'Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)), (\n 'datetime_edited', models.DateTimeField(auto_now_add=True)), (\n 'author', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='actions', to=\n 'class_room.User')), ('content_type', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='events', to='class_room.User'))], options={\n 'verbose_name_plural': 'User`s history events'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('game_skeleton', '0001_initial'), ('contenttypes',\n '0002_remove_content_type_name'), ('class_room', '0001_initial')]\n operations = [migrations.CreateModel(name='UserHero', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('datetime_created', models.\n DateTimeField(auto_now=True)), ('datetime_edited', models.\n DateTimeField(auto_now_add=True)), ('datetime_finished', models.\n DateTimeField(blank=True, null=True)), ('capacity', models.\n FloatField()), ('wallet', models.DecimalField(decimal_places=4,\n default=0.0, max_digits=10)), ('hero_class', models.OneToOneField(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'game_skeleton.HeroClass')), ('user', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='heroes', to=\n 'class_room.User'))]), migrations.CreateModel(name='EventHistory',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('object_id', models.\n PositiveIntegerField()), ('is_draft', models.BooleanField(default=\n False, help_text=\n 'Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)), (\n 'datetime_edited', models.DateTimeField(auto_now_add=True)), (\n 'author', models.OneToOneField(null=True, on_delete=django.db.\n models.deletion.SET_NULL, related_name='actions', to=\n 'class_room.User')), ('content_type', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.\n CASCADE, related_name='events', to='class_room.User'))], options={\n 'verbose_name_plural': 'User`s history events'})]\n",
"step-5": "# Generated by Django 3.0 on 2020-05-04 16:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('game_skeleton', '0001_initial'),\n ('contenttypes', '0002_remove_content_type_name'),\n ('class_room', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserHero',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('datetime_created', models.DateTimeField(auto_now=True)),\n ('datetime_edited', models.DateTimeField(auto_now_add=True)),\n ('datetime_finished', models.DateTimeField(blank=True, null=True)),\n ('capacity', models.FloatField()),\n ('wallet', models.DecimalField(decimal_places=4, default=0.0, max_digits=10)),\n ('hero_class', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='game_skeleton.HeroClass')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='heroes', to='class_room.User')),\n ],\n ),\n migrations.CreateModel(\n name='EventHistory',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('object_id', models.PositiveIntegerField()),\n ('is_draft', models.BooleanField(default=False, help_text='Draft note does not participate in hero capacity calculation.')),\n ('datetime_created', models.DateTimeField(auto_now=True)),\n ('datetime_edited', models.DateTimeField(auto_now_add=True)),\n ('author', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='actions', to='class_room.User')),\n ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),\n ('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='events', to='class_room.User')),\n ],\n options={\n 'verbose_name_plural': 'User`s history events',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
class Task(models.Model):
level = models.PositiveSmallIntegerField()
topic = models.CharField(max_length=100)
content = models.TextField()
correct_answer = models.CharField(max_length=50)
class Answer(models.Model):
content = models.TextField()
user = models.CharField(max_length = 100, null = True)
task = models.ForeignKey(
'Task',
on_delete=models.CASCADE,
)
|
normal
|
{
"blob_id": "06e01dce7e2342be994569099ed51d1fe28eea1c",
"index": 5784,
"step-1": "<mask token>\n\n\nclass Answer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Task(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-3": "<mask token>\n\n\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-4": "from django.db import models\n\n\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length=100, null=True)\n task = models.ForeignKey('Task', on_delete=models.CASCADE)\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass Task(models.Model):\n level = models.PositiveSmallIntegerField()\n topic = models.CharField(max_length=100)\n content = models.TextField()\n correct_answer = models.CharField(max_length=50)\n\nclass Answer(models.Model):\n content = models.TextField()\n user = models.CharField(max_length = 100, null = True)\n task = models.ForeignKey(\n 'Task',\n on_delete=models.CASCADE,\n )\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from __future__ import annotations
from .base import * # noqa
SECRET_KEY = "django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "puka-test",
"USER": "jeff",
"PASSWORD": "",
"HOST": "127.0.0.1",
"PORT": "5432",
},
}
|
normal
|
{
"blob_id": "2432e2b4da8af284055e7edf6e0bd94b7b293f0b",
"index": 8601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSECRET_KEY = (\n 'django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn')\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'puka-test', 'USER': 'jeff', 'PASSWORD': '', 'HOST': '127.0.0.1',\n 'PORT': '5432'}}\n",
"step-3": "from __future__ import annotations\nfrom .base import *\nSECRET_KEY = (\n 'django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn')\nDATABASES = {'default': {'ENGINE': 'django.db.backends.postgresql', 'NAME':\n 'puka-test', 'USER': 'jeff', 'PASSWORD': '', 'HOST': '127.0.0.1',\n 'PORT': '5432'}}\n",
"step-4": "from __future__ import annotations\n\nfrom .base import * # noqa\n\nSECRET_KEY = \"django-insecure-usp0sg081f=9+_j95j@-k^sfp+9c*!qrwh-m17%=_9^xot#9fn\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"puka-test\",\n \"USER\": \"jeff\",\n \"PASSWORD\": \"\",\n \"HOST\": \"127.0.0.1\",\n \"PORT\": \"5432\",\n },\n}\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
numpy.random.seed(2)
<|reserved_special_token_0|>
plt.scatter(x, y)
plt.title('Original dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
<|reserved_special_token_0|>
plt.scatter(train_x, train_y)
plt.title('Train dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
plt.scatter(test_x, test_y)
plt.title('Test dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
numpy.random.seed(2)
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
plt.scatter(x, y)
plt.title('Original dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
train_x = x[:80]
train_y = y[:80]
test_x = x[80:]
test_y = y[80:]
plt.scatter(train_x, train_y)
plt.title('Train dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
plt.scatter(test_x, test_y)
plt.title('Test dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
<|reserved_special_token_1|>
import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
plt.scatter(x, y)
plt.title('Original dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
train_x = x[:80]
train_y = y[:80]
test_x = x[80:]
test_y = y[80:]
plt.scatter(train_x, train_y)
plt.title('Train dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
plt.scatter(test_x, test_y)
plt.title('Test dataset')
plt.xlabel('Minutes')
plt.ylabel('Spent money')
plt.show()
<|reserved_special_token_1|>
import numpy
import matplotlib.pyplot as plt
numpy.random.seed(2)
# create datasets
x = numpy.random.normal(3, 1, 100)
y = numpy.random.normal(150, 40, 100) / x
# displaying original dataset
plt.scatter(x, y)
plt.title("Original dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# train dataset will be 80% of the data
train_x = x[:80]
train_y = y[:80]
# test dataset will be remaining 20% of the data
test_x = x[80:]
test_y = y[80:]
# displaying train dataset
plt.scatter(train_x, train_y)
plt.title("Train dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
# displaying test dataset
plt.scatter(test_x, test_y)
plt.title("Test dataset")
plt.xlabel("Minutes")
plt.ylabel("Spent money")
plt.show()
|
flexible
|
{
"blob_id": "9fd985e9675514f6c8f3ac5b91962eb744e0e82c",
"index": 6514,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnumpy.random.seed(2)\n<mask token>\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n<mask token>\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-3": "<mask token>\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-4": "import numpy\nimport matplotlib.pyplot as plt\nnumpy.random.seed(2)\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\nplt.scatter(x, y)\nplt.title('Original dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\ntrain_x = x[:80]\ntrain_y = y[:80]\ntest_x = x[80:]\ntest_y = y[80:]\nplt.scatter(train_x, train_y)\nplt.title('Train dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\nplt.scatter(test_x, test_y)\nplt.title('Test dataset')\nplt.xlabel('Minutes')\nplt.ylabel('Spent money')\nplt.show()\n",
"step-5": "import numpy\nimport matplotlib.pyplot as plt\n\nnumpy.random.seed(2)\n\n# create datasets\nx = numpy.random.normal(3, 1, 100)\ny = numpy.random.normal(150, 40, 100) / x\n\n# displaying original dataset\nplt.scatter(x, y)\nplt.title(\"Original dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# train dataset will be 80% of the data\ntrain_x = x[:80]\ntrain_y = y[:80]\n\n# test dataset will be remaining 20% of the data\ntest_x = x[80:]\ntest_y = y[80:]\n\n# displaying train dataset\nplt.scatter(train_x, train_y)\nplt.title(\"Train dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n\n# displaying test dataset\nplt.scatter(test_x, test_y)\nplt.title(\"Test dataset\")\nplt.xlabel(\"Minutes\")\nplt.ylabel(\"Spent money\")\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import tempfile
import zipfile
from contextlib import contextmanager
from utils import (
codepipeline_lambda_handler,
create_zip_file,
get_artifact_s3_client,
get_cloudformation_template,
get_input_artifact_location,
get_output_artifact_location,
get_session,
get_user_parameters,
log,
)
@codepipeline_lambda_handler
def lambda_handler(event, context):
"""
Prepares for an AMI deployment.
"""
# Get details from the event.
job = event["CodePipeline.job"]
input_bucket, input_key = get_input_artifact_location(job)
output_bucket, output_key = get_output_artifact_location(job)
user_params = get_user_parameters(job)
assume_role_arn = user_params["AssumeRoleArn"]
image_parameter_name = user_params["ImageParameterName"]
stack_name = user_params["StackName"]
template_filename = user_params["TemplateFilename"]
# Create client in the pipeline account.
pipeline_s3_client = get_artifact_s3_client(job)
# Create clients in the target account.
target_session = get_session(
role_arn=assume_role_arn, session_name="prepare-ami-deployment"
)
target_cfn_client = target_session.client("cloudformation")
target_ssm_client = target_session.client("ssm")
# Download the input artifact zip file, read manifest.json from it,
# and get the AMI it references. Also look up the associated image name.
with download_zip_file(
s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key
) as zip_file:
image_detail_string = zip_file.read("imageDetail.json").decode("utf-8")
log("IMAGE_DETAIL_STRING", image_detail_string)
image_detail = json.loads(image_detail_string)
image = image_detail["ImageURI"]
log("IMAGE", image)
# Update the SSM parameters with the image,
# to be used by the CloudFormation deployment stage of the pipeline.
target_ssm_client.put_parameter(
Name=image_parameter_name, Value=image, Type="String", Overwrite=True
)
# Write the CloudFormation stack's template to the output artifact location,
# to be used by the CloudFormation deployment stage of the pipeline.
template = get_cloudformation_template(
cfn_client=target_cfn_client, stack_name=stack_name
)
with create_zip_file({template_filename: template}) as zip_path:
pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)
@contextmanager
def download_zip_file(s3_client, bucket, key):
"""
Downloads and extracts a zip file from S3.
"""
temp_file = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as temp_file:
s3_client.download_file(bucket, key, temp_file.name)
with zipfile.ZipFile(temp_file.name, "r") as zip_file:
yield zip_file
|
normal
|
{
"blob_id": "4c59e5fab2469af3f40cafaac226a993f6628290",
"index": 3624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, 'r') as zip_file:\n yield zip_file\n",
"step-4": "import json\nimport tempfile\nimport zipfile\nfrom contextlib import contextmanager\nfrom utils import codepipeline_lambda_handler, create_zip_file, get_artifact_s3_client, get_cloudformation_template, get_input_artifact_location, get_output_artifact_location, get_session, get_user_parameters, log\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n job = event['CodePipeline.job']\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params['AssumeRoleArn']\n image_parameter_name = user_params['ImageParameterName']\n stack_name = user_params['StackName']\n template_filename = user_params['TemplateFilename']\n pipeline_s3_client = get_artifact_s3_client(job)\n target_session = get_session(role_arn=assume_role_arn, session_name=\n 'prepare-ami-deployment')\n target_cfn_client = target_session.client('cloudformation')\n target_ssm_client = target_session.client('ssm')\n with download_zip_file(s3_client=pipeline_s3_client, bucket=\n input_bucket, key=input_key) as zip_file:\n image_detail_string = zip_file.read('imageDetail.json').decode('utf-8')\n log('IMAGE_DETAIL_STRING', image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail['ImageURI']\n log('IMAGE', image)\n target_ssm_client.put_parameter(Name=image_parameter_name, Value=image,\n Type='String', Overwrite=True)\n template = get_cloudformation_template(cfn_client=target_cfn_client,\n stack_name=stack_name)\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, 'r') as zip_file:\n yield zip_file\n",
"step-5": "import json\nimport tempfile\nimport zipfile\nfrom contextlib import contextmanager\n\nfrom utils import (\n codepipeline_lambda_handler,\n create_zip_file,\n get_artifact_s3_client,\n get_cloudformation_template,\n get_input_artifact_location,\n get_output_artifact_location,\n get_session,\n get_user_parameters,\n log,\n)\n\n\n@codepipeline_lambda_handler\ndef lambda_handler(event, context):\n \"\"\"\n Prepares for an AMI deployment.\n\n \"\"\"\n\n # Get details from the event.\n job = event[\"CodePipeline.job\"]\n input_bucket, input_key = get_input_artifact_location(job)\n output_bucket, output_key = get_output_artifact_location(job)\n user_params = get_user_parameters(job)\n assume_role_arn = user_params[\"AssumeRoleArn\"]\n image_parameter_name = user_params[\"ImageParameterName\"]\n stack_name = user_params[\"StackName\"]\n template_filename = user_params[\"TemplateFilename\"]\n\n # Create client in the pipeline account.\n pipeline_s3_client = get_artifact_s3_client(job)\n\n # Create clients in the target account.\n target_session = get_session(\n role_arn=assume_role_arn, session_name=\"prepare-ami-deployment\"\n )\n target_cfn_client = target_session.client(\"cloudformation\")\n target_ssm_client = target_session.client(\"ssm\")\n\n # Download the input artifact zip file, read manifest.json from it,\n # and get the AMI it references. Also look up the associated image name.\n with download_zip_file(\n s3_client=pipeline_s3_client, bucket=input_bucket, key=input_key\n ) as zip_file:\n image_detail_string = zip_file.read(\"imageDetail.json\").decode(\"utf-8\")\n log(\"IMAGE_DETAIL_STRING\", image_detail_string)\n image_detail = json.loads(image_detail_string)\n image = image_detail[\"ImageURI\"]\n log(\"IMAGE\", image)\n\n # Update the SSM parameters with the image,\n # to be used by the CloudFormation deployment stage of the pipeline.\n target_ssm_client.put_parameter(\n Name=image_parameter_name, Value=image, Type=\"String\", Overwrite=True\n )\n\n # Write the CloudFormation stack's template to the output artifact location,\n # to be used by the CloudFormation deployment stage of the pipeline.\n template = get_cloudformation_template(\n cfn_client=target_cfn_client, stack_name=stack_name\n )\n with create_zip_file({template_filename: template}) as zip_path:\n pipeline_s3_client.upload_file(zip_path, output_bucket, output_key)\n\n\n@contextmanager\ndef download_zip_file(s3_client, bucket, key):\n \"\"\"\n Downloads and extracts a zip file from S3.\n\n \"\"\"\n\n temp_file = tempfile.NamedTemporaryFile()\n with tempfile.NamedTemporaryFile() as temp_file:\n s3_client.download_file(bucket, key, temp_file.name)\n with zipfile.ZipFile(temp_file.name, \"r\") as zip_file:\n yield zip_file\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
#
# Display all tags in the specified file for neoview.
# Author: Andrew Pyatkov <[email protected]>
# License: MIT
#
"""
Display all tags in the specified file for neoview.
Output: {file_name}\t{tag_address}\t{displayable_tag_info}
"""
import argparse
#import os
#import re
import subprocess
# Parse command line args.
parser = argparse.ArgumentParser()
parser.add_argument("file", help="File name to display tags from")
args = parser.parse_args()
filename = args.file
# Colors for the output, see for more info:
# https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit
COLOR_TAGTYPE = '\033[1;35m'
COLOR_TAGNAME = ''
COLOR_COMMENT = '\033[0;32m'
COLOR_BAR = '\033[0;37m'
COLOR_RESET = '\033[m'
# Contains lists of [file_name, tag_address, tag_name, comment].
# 'file_name' is relative to the current directory.
# 'tag_address' can be a number or a "/^line$/".
tags = []
# Max length of a tag name.
max_tag_len = 0
cmd = 'ctags -f - --excmd=number %s' % filename
result = subprocess.check_output(cmd, shell=True)
out = result.decode("utf-8", errors="ignore").rstrip().split("\n")
def displayable_info(tagname, comment):
cs = comment.split("\t", 1)
return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').\
format(
COLOR_TAGNAME, tagname, COLOR_RESET,
COLOR_BAR, COLOR_TAGTYPE, cs[0], COLOR_BAR, COLOR_RESET,
COLOR_COMMENT, cs[1] if len(cs) == 2 else "", COLOR_RESET)
for l in out:
# t[0] - tag name, t[1] - file name, t[2] - tag address and comment
t = l.split("\t", 2)
max_tag_len = max(max_tag_len, len(t[0]))
# info[0] - tag address, info[1] - comment
info = t[2].split(';"')
tags.append([t[1], info[0], t[0], info[1].strip()])
for t in tags:
print('%s\t%s\t%s' %
(t[0], t[1], displayable_info(t[2], t[3])))
|
normal
|
{
"blob_id": "b220cacc2530ca62b5599a9c1894e979bcfd5109",
"index": 9633,
"step-1": "<mask token>\n\n\ndef displayable_info(tagname, comment):\n cs = comment.split('\\t', 1)\n return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').format(\n COLOR_TAGNAME, tagname, COLOR_RESET, COLOR_BAR, COLOR_TAGTYPE, cs[0\n ], COLOR_BAR, COLOR_RESET, COLOR_COMMENT, cs[1] if len(cs) == 2 else\n '', COLOR_RESET)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('file', help='File name to display tags from')\n<mask token>\n\n\ndef displayable_info(tagname, comment):\n cs = comment.split('\\t', 1)\n return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').format(\n COLOR_TAGNAME, tagname, COLOR_RESET, COLOR_BAR, COLOR_TAGTYPE, cs[0\n ], COLOR_BAR, COLOR_RESET, COLOR_COMMENT, cs[1] if len(cs) == 2 else\n '', COLOR_RESET)\n\n\nfor l in out:\n t = l.split('\\t', 2)\n max_tag_len = max(max_tag_len, len(t[0]))\n info = t[2].split(';\"')\n tags.append([t[1], info[0], t[0], info[1].strip()])\nfor t in tags:\n print('%s\\t%s\\t%s' % (t[0], t[1], displayable_info(t[2], t[3])))\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser()\nparser.add_argument('file', help='File name to display tags from')\nargs = parser.parse_args()\nfilename = args.file\nCOLOR_TAGTYPE = '\\x1b[1;35m'\nCOLOR_TAGNAME = ''\nCOLOR_COMMENT = '\\x1b[0;32m'\nCOLOR_BAR = '\\x1b[0;37m'\nCOLOR_RESET = '\\x1b[m'\ntags = []\nmax_tag_len = 0\ncmd = 'ctags -f - --excmd=number %s' % filename\nresult = subprocess.check_output(cmd, shell=True)\nout = result.decode('utf-8', errors='ignore').rstrip().split('\\n')\n\n\ndef displayable_info(tagname, comment):\n cs = comment.split('\\t', 1)\n return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').format(\n COLOR_TAGNAME, tagname, COLOR_RESET, COLOR_BAR, COLOR_TAGTYPE, cs[0\n ], COLOR_BAR, COLOR_RESET, COLOR_COMMENT, cs[1] if len(cs) == 2 else\n '', COLOR_RESET)\n\n\nfor l in out:\n t = l.split('\\t', 2)\n max_tag_len = max(max_tag_len, len(t[0]))\n info = t[2].split(';\"')\n tags.append([t[1], info[0], t[0], info[1].strip()])\nfor t in tags:\n print('%s\\t%s\\t%s' % (t[0], t[1], displayable_info(t[2], t[3])))\n",
"step-4": "<mask token>\nimport argparse\nimport subprocess\nparser = argparse.ArgumentParser()\nparser.add_argument('file', help='File name to display tags from')\nargs = parser.parse_args()\nfilename = args.file\nCOLOR_TAGTYPE = '\\x1b[1;35m'\nCOLOR_TAGNAME = ''\nCOLOR_COMMENT = '\\x1b[0;32m'\nCOLOR_BAR = '\\x1b[0;37m'\nCOLOR_RESET = '\\x1b[m'\ntags = []\nmax_tag_len = 0\ncmd = 'ctags -f - --excmd=number %s' % filename\nresult = subprocess.check_output(cmd, shell=True)\nout = result.decode('utf-8', errors='ignore').rstrip().split('\\n')\n\n\ndef displayable_info(tagname, comment):\n cs = comment.split('\\t', 1)\n return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').format(\n COLOR_TAGNAME, tagname, COLOR_RESET, COLOR_BAR, COLOR_TAGTYPE, cs[0\n ], COLOR_BAR, COLOR_RESET, COLOR_COMMENT, cs[1] if len(cs) == 2 else\n '', COLOR_RESET)\n\n\nfor l in out:\n t = l.split('\\t', 2)\n max_tag_len = max(max_tag_len, len(t[0]))\n info = t[2].split(';\"')\n tags.append([t[1], info[0], t[0], info[1].strip()])\nfor t in tags:\n print('%s\\t%s\\t%s' % (t[0], t[1], displayable_info(t[2], t[3])))\n",
"step-5": "#!/usr/bin/env python3\n#\n# Display all tags in the specified file for neoview.\n# Author: Andrew Pyatkov <[email protected]>\n# License: MIT\n#\n\"\"\"\nDisplay all tags in the specified file for neoview.\nOutput: {file_name}\\t{tag_address}\\t{displayable_tag_info}\n\"\"\"\nimport argparse\n#import os\n#import re\nimport subprocess\n\n# Parse command line args.\nparser = argparse.ArgumentParser()\nparser.add_argument(\"file\", help=\"File name to display tags from\")\nargs = parser.parse_args()\n\nfilename = args.file\n\n# Colors for the output, see for more info:\n# https://en.wikipedia.org/wiki/ANSI_escape_code#3/4_bit\nCOLOR_TAGTYPE = '\\033[1;35m'\nCOLOR_TAGNAME = ''\nCOLOR_COMMENT = '\\033[0;32m'\nCOLOR_BAR = '\\033[0;37m'\nCOLOR_RESET = '\\033[m'\n\n# Contains lists of [file_name, tag_address, tag_name, comment].\n# 'file_name' is relative to the current directory.\n# 'tag_address' can be a number or a \"/^line$/\".\ntags = []\n\n# Max length of a tag name.\nmax_tag_len = 0\n\ncmd = 'ctags -f - --excmd=number %s' % filename\n\nresult = subprocess.check_output(cmd, shell=True)\nout = result.decode(\"utf-8\", errors=\"ignore\").rstrip().split(\"\\n\")\n\n\ndef displayable_info(tagname, comment):\n cs = comment.split(\"\\t\", 1)\n return ('{}{:<' + str(max_tag_len) + '}{} {}|{}{}{}|{} {}{}{}').\\\n format(\n COLOR_TAGNAME, tagname, COLOR_RESET,\n COLOR_BAR, COLOR_TAGTYPE, cs[0], COLOR_BAR, COLOR_RESET,\n COLOR_COMMENT, cs[1] if len(cs) == 2 else \"\", COLOR_RESET)\n\nfor l in out:\n # t[0] - tag name, t[1] - file name, t[2] - tag address and comment\n t = l.split(\"\\t\", 2)\n max_tag_len = max(max_tag_len, len(t[0]))\n # info[0] - tag address, info[1] - comment\n info = t[2].split(';\"')\n tags.append([t[1], info[0], t[0], info[1].strip()])\n\nfor t in tags:\n print('%s\\t%s\\t%s' %\n (t[0], t[1], displayable_info(t[2], t[3])))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 18 20:24:53 2020
@author: filip
"""
import re
texto = "Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia."
texto1 = ['Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido', ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia']
def separa_sentencas(texto):
'''A funcao recebe um texto e devolve uma lista das sentencas dentro do texto'''
sentencas = re.split(r'[.!?]+', texto)
if sentencas[-1] == '':
del sentencas[-1]
return sentencas
def separa_frases(sentenca):
'''A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca'''
sentenca = re.split(r'[,:;]+', sentenca)
return sentenca
def separa_palavras(frase):
'''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''
return frase.split()
def n_palavras_unicas(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez'''
freq = dict()
unicas = 0
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
if freq[p] == 1:
unicas -= 1
freq[p] += 1
else:
freq[p] = 1
unicas += 1
return unicas
def n_palavras_diferentes(lista_palavras):
'''Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas'''
freq = dict()
for palavra in lista_palavras:
p = palavra.lower()
if p in freq:
freq[p] += 1
else:
freq[p] = 1
return len(freq)
def lista_frases (sentenca):
list_frases = []
list_sent = separa_sentencas(sentenca)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def lista_palavras (frases):
list_palavras = []
list_fr = lista_frases(frases)
for frase in list_fr:
novas_palavras = separa_palavras(frase)
list_palavras.extend(novas_palavras)
return list_palavras
def tam_medio (list_palavras): # Traço linguístico 1
palavras = lista_palavras(texto)
i = 0
soma_palavras = 0
while i < len(palavras):
x = palavras[i]
soma_palavras = soma_palavras + len(x)
i +=1
tam = soma_palavras/len(palavras)
return tam
def type_token(list_palavras): # Traço linguístico 2
palavras = lista_palavras(texto)
TT = n_palavras_diferentes(palavras)/ len(palavras)
return TT
def hapax_legomana (list_palavras): # Traço linguístico 3
palavras = lista_palavras(texto)
HL = n_palavras_unicas(palavras)/ len(palavras)
return HL
def soma_caracteres_sentenca(lista_sent):
lista_sent = separa_sentencas(texto)
i = 0
soma = 0
while i < len(lista_sent):
x = lista_sent[i]
len(x)
soma = soma + len(x)
i +=1
return soma
def tam_medio_sentenca(lista_sent): # Traço linguístico 4
TMS = soma_caracteres_sentenca(lista_sent)/ len(separa_sentencas(lista_sent))
return TMS
def frases (sentenca):
list_frases = []
list_sent = separa_sentencas(texto)
for sent in list_sent:
novas_frases = separa_frases(sent)
list_frases.extend(novas_frases)
return list_frases
def complexidade_sentenca (texto): # Traço linguístico 5
CS = len(frases(texto))/ len(separa_sentencas(texto))
return CS
def soma_caracteres_frases(lista_frases):
lista_fr = frases(lista_frases)
i = 0
soma_fr = 0
while i < len(lista_fr):
x = lista_fr[i]
len(x)
soma_fr = soma_fr + len(x)
i +=1
return soma_fr
def tam_medio_frase(lista_frases): # Traço linguístico 6
TMF = soma_caracteres_frases(lista_frases)/ len (frases(lista_frases))
return TMF
def le_textos():
'''A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento'''
i = 1
textos = []
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
while texto:
textos.append(texto)
i += 1
texto = input("Digite o texto " + str(i) +" (aperte enter para sair):")
return textos
def compara_assinatura(as_a, as_b):
'''IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.'''
i = 0
soma = 0
for i in range(6):
soma += abs (as_a[i] - as_b[i])
Sab = soma / 6
return Sab
def calcula_assinatura(texto):
as_b = []
lista.append(tam_medio(texto))
lista.append(type_token(texto))
lista.append(hapax_legomana (texto))
lista.append(tam_medio_sentenca(texto))
lista.append(complexidade_sentenca (texto))
lista.append(tam_medio_frase(texto))
return as_b
def avalia_textos(textos, ass_cp):
'''IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.'''
lista_sab = []
menor = 0
for texto in textos:
as_texto = calcula_assinatura(texto)
comparar = compara_assinatura(ass_cp, as_texto)
lista_sab.append(comparar)
menor = min(lista_sab)
return (lista.index(menor) + 1)
|
normal
|
{
"blob_id": "e207063eb3eb1929e0e24b62e6b77a8924a80489",
"index": 1001,
"step-1": "<mask token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\n<mask token>\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\n<mask token>\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\n<mask token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<mask token>\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\n<mask token>\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"step-3": "<mask token>\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\n<mask token>\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"step-4": "<mask token>\nimport re\ntexto = (\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.'\n )\ntexto1 = [\n 'Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido'\n ,\n ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia'\n ]\n\n\ndef separa_sentencas(texto):\n \"\"\"A funcao recebe um texto e devolve uma lista das sentencas dentro do texto\"\"\"\n sentencas = re.split('[.!?]+', texto)\n if sentencas[-1] == '':\n del sentencas[-1]\n return sentencas\n\n\ndef separa_frases(sentenca):\n \"\"\"A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca\"\"\"\n sentenca = re.split('[,:;]+', sentenca)\n return sentenca\n\n\ndef separa_palavras(frase):\n \"\"\"A funcao recebe uma frase e devolve uma lista das palavras dentro da frase\"\"\"\n return frase.split()\n\n\ndef n_palavras_unicas(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez\"\"\"\n freq = dict()\n unicas = 0\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n if freq[p] == 1:\n unicas -= 1\n freq[p] += 1\n else:\n freq[p] = 1\n unicas += 1\n return unicas\n\n\ndef n_palavras_diferentes(lista_palavras):\n \"\"\"Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas\"\"\"\n freq = dict()\n for palavra in lista_palavras:\n p = palavra.lower()\n if p in freq:\n freq[p] += 1\n else:\n freq[p] = 1\n return len(freq)\n\n\ndef lista_frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(sentenca)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef lista_palavras(frases):\n list_palavras = []\n list_fr = lista_frases(frases)\n for frase in list_fr:\n novas_palavras = separa_palavras(frase)\n list_palavras.extend(novas_palavras)\n return list_palavras\n\n\ndef tam_medio(list_palavras):\n palavras = lista_palavras(texto)\n i = 0\n soma_palavras = 0\n while i < len(palavras):\n x = palavras[i]\n soma_palavras = soma_palavras + len(x)\n i += 1\n tam = soma_palavras / len(palavras)\n return tam\n\n\ndef type_token(list_palavras):\n palavras = lista_palavras(texto)\n TT = n_palavras_diferentes(palavras) / len(palavras)\n return TT\n\n\ndef hapax_legomana(list_palavras):\n palavras = lista_palavras(texto)\n HL = n_palavras_unicas(palavras) / len(palavras)\n return HL\n\n\ndef soma_caracteres_sentenca(lista_sent):\n lista_sent = separa_sentencas(texto)\n i = 0\n soma = 0\n while i < len(lista_sent):\n x = lista_sent[i]\n len(x)\n soma = soma + len(x)\n i += 1\n return soma\n\n\ndef tam_medio_sentenca(lista_sent):\n TMS = soma_caracteres_sentenca(lista_sent) / len(separa_sentencas(\n lista_sent))\n return TMS\n\n\ndef frases(sentenca):\n list_frases = []\n list_sent = separa_sentencas(texto)\n for sent in list_sent:\n novas_frases = separa_frases(sent)\n list_frases.extend(novas_frases)\n return list_frases\n\n\ndef complexidade_sentenca(texto):\n CS = len(frases(texto)) / len(separa_sentencas(texto))\n return CS\n\n\ndef soma_caracteres_frases(lista_frases):\n lista_fr = frases(lista_frases)\n i = 0\n soma_fr = 0\n while i < len(lista_fr):\n x = lista_fr[i]\n len(x)\n soma_fr = soma_fr + len(x)\n i += 1\n return soma_fr\n\n\ndef tam_medio_frase(lista_frases):\n TMF = soma_caracteres_frases(lista_frases) / len(frases(lista_frases))\n return TMF\n\n\ndef le_textos():\n \"\"\"A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento\"\"\"\n i = 1\n textos = []\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):')\n while texto:\n textos.append(texto)\n i += 1\n texto = input('Digite o texto ' + str(i) + ' (aperte enter para sair):'\n )\n return textos\n\n\ndef compara_assinatura(as_a, as_b):\n \"\"\"IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.\"\"\"\n i = 0\n soma = 0\n for i in range(6):\n soma += abs(as_a[i] - as_b[i])\n Sab = soma / 6\n return Sab\n\n\ndef calcula_assinatura(texto):\n as_b = []\n lista.append(tam_medio(texto))\n lista.append(type_token(texto))\n lista.append(hapax_legomana(texto))\n lista.append(tam_medio_sentenca(texto))\n lista.append(complexidade_sentenca(texto))\n lista.append(tam_medio_frase(texto))\n return as_b\n\n\ndef avalia_textos(textos, ass_cp):\n \"\"\"IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.\"\"\"\n lista_sab = []\n menor = 0\n for texto in textos:\n as_texto = calcula_assinatura(texto)\n comparar = compara_assinatura(ass_cp, as_texto)\n lista_sab.append(comparar)\n menor = min(lista_sab)\n return lista.index(menor) + 1\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 18 20:24:53 2020\r\n\r\n@author: filip\r\n\"\"\"\r\n\r\nimport re\r\n\r\ntexto = \"Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido. Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia.\"\r\ntexto1 = ['Muito além, nos confins inexplorados da região mais brega da Borda Ocidental desta Galáxia, há um pequeno sol amarelo e esquecido', ' Girando em torno deste sol, a uma distancia de cerca de 148 milhões de quilômetros, há um planetinha verde-azulado absolutamente insignificante, cujas formas de vida, descendentes de primatas, são tão extraordinariamente primitivas que ainda acham que relógios digitais são uma grande ideia']\r\n\r\n\r\ndef separa_sentencas(texto):\r\n '''A funcao recebe um texto e devolve uma lista das sentencas dentro do texto'''\r\n sentencas = re.split(r'[.!?]+', texto)\r\n if sentencas[-1] == '':\r\n del sentencas[-1]\r\n return sentencas\r\n\r\ndef separa_frases(sentenca):\r\n '''A funcao recebe uma sentenca e devolve uma lista das frases dentro da sentenca'''\r\n sentenca = re.split(r'[,:;]+', sentenca)\r\n return sentenca\r\n \r\ndef separa_palavras(frase):\r\n '''A funcao recebe uma frase e devolve uma lista das palavras dentro da frase'''\r\n return frase.split()\r\n\r\ndef n_palavras_unicas(lista_palavras):\r\n '''Essa funcao recebe uma lista de palavras e devolve o numero de palavras que aparecem uma unica vez'''\r\n freq = dict()\r\n unicas = 0\r\n for palavra in lista_palavras:\r\n p = palavra.lower()\r\n if p in freq:\r\n if freq[p] == 1:\r\n unicas -= 1\r\n freq[p] += 1\r\n else:\r\n freq[p] = 1\r\n unicas += 1\r\n\r\n return unicas\r\n\r\ndef n_palavras_diferentes(lista_palavras):\r\n '''Essa funcao recebe uma lista de palavras e devolve o numero de palavras diferentes utilizadas'''\r\n freq = dict()\r\n for palavra in lista_palavras:\r\n p = palavra.lower()\r\n if p in freq:\r\n freq[p] += 1\r\n else:\r\n freq[p] = 1\r\n\r\n return len(freq)\r\n\r\n\r\ndef lista_frases (sentenca):\r\n list_frases = []\r\n list_sent = separa_sentencas(sentenca)\r\n for sent in list_sent:\r\n novas_frases = separa_frases(sent)\r\n list_frases.extend(novas_frases)\r\n return list_frases\r\n \r\ndef lista_palavras (frases):\r\n list_palavras = []\r\n list_fr = lista_frases(frases)\r\n for frase in list_fr:\r\n novas_palavras = separa_palavras(frase)\r\n list_palavras.extend(novas_palavras)\r\n return list_palavras\r\n \r\n\r\n\r\n\r\ndef tam_medio (list_palavras): # Traço linguístico 1\r\n palavras = lista_palavras(texto)\r\n i = 0\r\n soma_palavras = 0\r\n while i < len(palavras):\r\n x = palavras[i]\r\n soma_palavras = soma_palavras + len(x)\r\n i +=1\r\n tam = soma_palavras/len(palavras)\r\n return tam\r\n\r\ndef type_token(list_palavras): # Traço linguístico 2\r\n palavras = lista_palavras(texto)\r\n TT = n_palavras_diferentes(palavras)/ len(palavras)\r\n return TT\r\n\r\ndef hapax_legomana (list_palavras): # Traço linguístico 3\r\n palavras = lista_palavras(texto)\r\n HL = n_palavras_unicas(palavras)/ len(palavras)\r\n return HL\r\n\r\n\r\ndef soma_caracteres_sentenca(lista_sent):\r\n lista_sent = separa_sentencas(texto)\r\n i = 0\r\n soma = 0\r\n while i < len(lista_sent):\r\n x = lista_sent[i]\r\n len(x)\r\n soma = soma + len(x)\r\n i +=1\r\n return soma\r\n\r\ndef tam_medio_sentenca(lista_sent): # Traço linguístico 4\r\n TMS = soma_caracteres_sentenca(lista_sent)/ len(separa_sentencas(lista_sent))\r\n return TMS\r\n\r\ndef frases (sentenca):\r\n list_frases = []\r\n list_sent = separa_sentencas(texto)\r\n for sent in list_sent:\r\n novas_frases = separa_frases(sent)\r\n list_frases.extend(novas_frases)\r\n return list_frases\r\n \r\n\r\ndef complexidade_sentenca (texto): # Traço linguístico 5\r\n CS = len(frases(texto))/ len(separa_sentencas(texto))\r\n return CS\r\n\r\n\r\ndef soma_caracteres_frases(lista_frases):\r\n lista_fr = frases(lista_frases)\r\n i = 0\r\n soma_fr = 0\r\n while i < len(lista_fr):\r\n x = lista_fr[i]\r\n len(x)\r\n soma_fr = soma_fr + len(x)\r\n i +=1\r\n return soma_fr\r\n\r\ndef tam_medio_frase(lista_frases): # Traço linguístico 6\r\n TMF = soma_caracteres_frases(lista_frases)/ len (frases(lista_frases))\r\n\r\n return TMF\r\n\r\ndef le_textos():\r\n '''A funcao le todos os textos a serem comparados e devolve uma lista contendo cada texto como um elemento'''\r\n i = 1\r\n textos = []\r\n texto = input(\"Digite o texto \" + str(i) +\" (aperte enter para sair):\")\r\n while texto:\r\n textos.append(texto)\r\n i += 1\r\n texto = input(\"Digite o texto \" + str(i) +\" (aperte enter para sair):\")\r\n\r\n return textos\r\n\r\n\r\ndef compara_assinatura(as_a, as_b):\r\n '''IMPLEMENTAR. Essa funcao recebe duas assinaturas de texto e deve devolver o grau de similaridade nas assinaturas.'''\r\n i = 0\r\n soma = 0\r\n for i in range(6):\r\n soma += abs (as_a[i] - as_b[i])\r\n Sab = soma / 6\r\n return Sab\r\n\r\ndef calcula_assinatura(texto):\r\n as_b = []\r\n lista.append(tam_medio(texto))\r\n lista.append(type_token(texto))\r\n lista.append(hapax_legomana (texto))\r\n lista.append(tam_medio_sentenca(texto))\r\n lista.append(complexidade_sentenca (texto))\r\n lista.append(tam_medio_frase(texto))\r\n return as_b\r\n\r\ndef avalia_textos(textos, ass_cp):\r\n '''IMPLEMENTAR. Essa funcao recebe uma lista de textos e uma assinatura ass_cp e deve devolver o numero (1 a n) do texto com maior probabilidade de ter sido infectado por COH-PIAH.'''\r\n lista_sab = []\r\n menor = 0\r\n for texto in textos:\r\n as_texto = calcula_assinatura(texto)\r\n comparar = compara_assinatura(ass_cp, as_texto)\r\n lista_sab.append(comparar)\r\n menor = min(lista_sab)\r\n return (lista.index(menor) + 1)\r\n\r\n\r\n\r\n\r\n",
"step-ids": [
6,
17,
19,
22,
23
]
}
|
[
6,
17,
19,
22,
23
] |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
import lightgbm as lgb
from typing import List, Text, Tuple, Union
from ...model.base import ModelFT
from ...data.dataset import DatasetH
from ...data.dataset.handler import DataHandlerLP
from ...model.interpret.base import LightGBMFInt
from ...data.dataset.weight import Reweighter
from qlib.workflow import R
class LGBModel(ModelFT, LightGBMFInt):
"""LightGBM Model"""
def __init__(self, loss="mse", early_stopping_rounds=50, num_boost_round=1000, **kwargs):
if loss not in {"mse", "binary"}:
raise NotImplementedError
self.params = {"objective": loss, "verbosity": -1}
self.params.update(kwargs)
self.early_stopping_rounds = early_stopping_rounds
self.num_boost_round = num_boost_round
self.model = None
def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:
"""
The motivation of current version is to make validation optional
- train segment is necessary;
"""
ds_l = []
assert "train" in dataset.segments
for key in ["train", "valid"]:
if key in dataset.segments:
df = dataset.prepare(key, col_set=["feature", "label"], data_key=DataHandlerLP.DK_L)
if df.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
x, y = df["feature"], df["label"]
# Lightgbm need 1D array as its label
if y.values.ndim == 2 and y.values.shape[1] == 1:
y = np.squeeze(y.values)
else:
raise ValueError("LightGBM doesn't support multi-label training")
if reweighter is None:
w = None
elif isinstance(reweighter, Reweighter):
w = reweighter.reweight(df)
else:
raise ValueError("Unsupported reweighter type.")
ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))
return ds_l
def fit(
self,
dataset: DatasetH,
num_boost_round=None,
early_stopping_rounds=None,
verbose_eval=20,
evals_result=None,
reweighter=None,
**kwargs,
):
if evals_result is None:
evals_result = {} # in case of unsafety of Python default values
ds_l = self._prepare_data(dataset, reweighter)
ds, names = list(zip(*ds_l))
early_stopping_callback = lgb.early_stopping(
self.early_stopping_rounds if early_stopping_rounds is None else early_stopping_rounds
)
# NOTE: if you encounter error here. Please upgrade your lightgbm
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
evals_result_callback = lgb.record_evaluation(evals_result)
self.model = lgb.train(
self.params,
ds[0], # training dataset
num_boost_round=self.num_boost_round if num_boost_round is None else num_boost_round,
valid_sets=ds,
valid_names=names,
callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback],
**kwargs,
)
for k in names:
for key, val in evals_result[k].items():
name = f"{key}.{k}"
for epoch, m in enumerate(val):
R.log_metrics(**{name.replace("@", "_"): m}, step=epoch)
def predict(self, dataset: DatasetH, segment: Union[Text, slice] = "test"):
if self.model is None:
raise ValueError("model is not fitted yet!")
x_test = dataset.prepare(segment, col_set="feature", data_key=DataHandlerLP.DK_I)
return pd.Series(self.model.predict(x_test.values), index=x_test.index)
def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):
"""
finetune model
Parameters
----------
dataset : DatasetH
dataset for finetuning
num_boost_round : int
number of round to finetune model
verbose_eval : int
verbose level
"""
# Based on existing model and finetune by train more rounds
dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632
if dtrain.empty:
raise ValueError("Empty data from dataset, please check your dataset config.")
verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)
self.model = lgb.train(
self.params,
dtrain,
num_boost_round=num_boost_round,
init_model=self.model,
valid_sets=[dtrain],
valid_names=["train"],
callbacks=[verbose_eval_callback],
)
|
normal
|
{
"blob_id": "d37187f067ddff94015e639a1759dddced817945",
"index": 6205,
"step-1": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n <mask token>\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n <mask token>\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-2": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n <mask token>\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-3": "<mask token>\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom typing import List, Text, Tuple, Union\nfrom ...model.base import ModelFT\nfrom ...data.dataset import DatasetH\nfrom ...data.dataset.handler import DataHandlerLP\nfrom ...model.interpret.base import LightGBMFInt\nfrom ...data.dataset.weight import Reweighter\nfrom qlib.workflow import R\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss='mse', early_stopping_rounds=50,\n num_boost_round=1000, **kwargs):\n if loss not in {'mse', 'binary'}:\n raise NotImplementedError\n self.params = {'objective': loss, 'verbosity': -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) ->List[Tuple\n [lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert 'train' in dataset.segments\n for key in ['train', 'valid']:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=['feature', 'label'],\n data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.'\n )\n x, y = df['feature'], df['label']\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\n \"LightGBM doesn't support multi-label training\")\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError('Unsupported reweighter type.')\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(self, dataset: DatasetH, num_boost_round=None,\n early_stopping_rounds=None, verbose_eval=20, evals_result=None,\n reweighter=None, **kwargs):\n if evals_result is None:\n evals_result = {}\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(self.\n early_stopping_rounds if early_stopping_rounds is None else\n early_stopping_rounds)\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(self.params, ds[0], num_boost_round=self.\n num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds, valid_names=names, callbacks=[\n early_stopping_callback, verbose_eval_callback,\n evals_result_callback], **kwargs)\n for k in names:\n for key, val in evals_result[k].items():\n name = f'{key}.{k}'\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace('@', '_'): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice]='test'):\n if self.model is None:\n raise ValueError('model is not fitted yet!')\n x_test = dataset.prepare(segment, col_set='feature', data_key=\n DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=\n 20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n dtrain, _ = self._prepare_data(dataset, reweighter)\n if dtrain.empty:\n raise ValueError(\n 'Empty data from dataset, please check your dataset config.')\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(self.params, dtrain, num_boost_round=\n num_boost_round, init_model=self.model, valid_sets=[dtrain],\n valid_names=['train'], callbacks=[verbose_eval_callback])\n",
"step-5": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgb\nfrom typing import List, Text, Tuple, Union\nfrom ...model.base import ModelFT\nfrom ...data.dataset import DatasetH\nfrom ...data.dataset.handler import DataHandlerLP\nfrom ...model.interpret.base import LightGBMFInt\nfrom ...data.dataset.weight import Reweighter\nfrom qlib.workflow import R\n\n\nclass LGBModel(ModelFT, LightGBMFInt):\n \"\"\"LightGBM Model\"\"\"\n\n def __init__(self, loss=\"mse\", early_stopping_rounds=50, num_boost_round=1000, **kwargs):\n if loss not in {\"mse\", \"binary\"}:\n raise NotImplementedError\n self.params = {\"objective\": loss, \"verbosity\": -1}\n self.params.update(kwargs)\n self.early_stopping_rounds = early_stopping_rounds\n self.num_boost_round = num_boost_round\n self.model = None\n\n def _prepare_data(self, dataset: DatasetH, reweighter=None) -> List[Tuple[lgb.Dataset, str]]:\n \"\"\"\n The motivation of current version is to make validation optional\n - train segment is necessary;\n \"\"\"\n ds_l = []\n assert \"train\" in dataset.segments\n for key in [\"train\", \"valid\"]:\n if key in dataset.segments:\n df = dataset.prepare(key, col_set=[\"feature\", \"label\"], data_key=DataHandlerLP.DK_L)\n if df.empty:\n raise ValueError(\"Empty data from dataset, please check your dataset config.\")\n x, y = df[\"feature\"], df[\"label\"]\n\n # Lightgbm need 1D array as its label\n if y.values.ndim == 2 and y.values.shape[1] == 1:\n y = np.squeeze(y.values)\n else:\n raise ValueError(\"LightGBM doesn't support multi-label training\")\n\n if reweighter is None:\n w = None\n elif isinstance(reweighter, Reweighter):\n w = reweighter.reweight(df)\n else:\n raise ValueError(\"Unsupported reweighter type.\")\n ds_l.append((lgb.Dataset(x.values, label=y, weight=w), key))\n return ds_l\n\n def fit(\n self,\n dataset: DatasetH,\n num_boost_round=None,\n early_stopping_rounds=None,\n verbose_eval=20,\n evals_result=None,\n reweighter=None,\n **kwargs,\n ):\n if evals_result is None:\n evals_result = {} # in case of unsafety of Python default values\n ds_l = self._prepare_data(dataset, reweighter)\n ds, names = list(zip(*ds_l))\n early_stopping_callback = lgb.early_stopping(\n self.early_stopping_rounds if early_stopping_rounds is None else early_stopping_rounds\n )\n # NOTE: if you encounter error here. Please upgrade your lightgbm\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n evals_result_callback = lgb.record_evaluation(evals_result)\n self.model = lgb.train(\n self.params,\n ds[0], # training dataset\n num_boost_round=self.num_boost_round if num_boost_round is None else num_boost_round,\n valid_sets=ds,\n valid_names=names,\n callbacks=[early_stopping_callback, verbose_eval_callback, evals_result_callback],\n **kwargs,\n )\n for k in names:\n for key, val in evals_result[k].items():\n name = f\"{key}.{k}\"\n for epoch, m in enumerate(val):\n R.log_metrics(**{name.replace(\"@\", \"_\"): m}, step=epoch)\n\n def predict(self, dataset: DatasetH, segment: Union[Text, slice] = \"test\"):\n if self.model is None:\n raise ValueError(\"model is not fitted yet!\")\n x_test = dataset.prepare(segment, col_set=\"feature\", data_key=DataHandlerLP.DK_I)\n return pd.Series(self.model.predict(x_test.values), index=x_test.index)\n\n def finetune(self, dataset: DatasetH, num_boost_round=10, verbose_eval=20, reweighter=None):\n \"\"\"\n finetune model\n\n Parameters\n ----------\n dataset : DatasetH\n dataset for finetuning\n num_boost_round : int\n number of round to finetune model\n verbose_eval : int\n verbose level\n \"\"\"\n # Based on existing model and finetune by train more rounds\n dtrain, _ = self._prepare_data(dataset, reweighter) # pylint: disable=W0632\n if dtrain.empty:\n raise ValueError(\"Empty data from dataset, please check your dataset config.\")\n verbose_eval_callback = lgb.log_evaluation(period=verbose_eval)\n self.model = lgb.train(\n self.params,\n dtrain,\n num_boost_round=num_boost_round,\n init_model=self.model,\n valid_sets=[dtrain],\n valid_names=[\"train\"],\n callbacks=[verbose_eval_callback],\n )\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('', views.skincare, name='skin'), path('productSearch/',
views.productSearch, name='productSearch'), path('detail/', views.
detail, name='detail')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [path('', views.skincare, name='skin'), path('productSearch/',
views.productSearch, name='productSearch'), path('detail/', views.
detail, name='detail')]
<|reserved_special_token_1|>
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.skincare, name="skin"),
path('productSearch/', views.productSearch, name="productSearch"),
path('detail/', views.detail, name="detail"),
]
|
flexible
|
{
"blob_id": "c31c59d172b2b23ca4676be0690603f33b56f557",
"index": 4867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.skincare, name='skin'), path('productSearch/',\n views.productSearch, name='productSearch'), path('detail/', views.\n detail, name='detail')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nurlpatterns = [path('', views.skincare, name='skin'), path('productSearch/',\n views.productSearch, name='productSearch'), path('detail/', views.\n detail, name='detail')]\n",
"step-4": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.skincare, name=\"skin\"),\n path('productSearch/', views.productSearch, name=\"productSearch\"),\n path('detail/', views.detail, name=\"detail\"),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = '\n[jdbc]\ninsert_primary_key = false\n\n '
resource_name_config = '\n[jdbc]\ninsert_primary_key = true\n '
connection = Connection(name='a', url='#', type='hive', connector=
'text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema, config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc',
int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key',
'jdbc', bool))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector=
'text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type=
'both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name=
'a.b.c.d', connection=connection, resource_name=r_name,
schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = '\n[jdbc]\ninsert_primary_key = false\n\n '
resource_name_config = '\n[jdbc]\ninsert_primary_key = true\n '
connection = Connection(name='a', url='#', type='hive', connector=
'text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema, config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc',
int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key',
'jdbc', bool))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector=
'text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type=
'both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name=
'a.b.c.d', connection=connection, resource_name=r_name,
schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = '\n[jdbc]\ninsert_primary_key = false\n\n '
resource_name_config = '\n[jdbc]\ninsert_primary_key = true\n '
connection = Connection(name='a', url='#', type='hive', connector=
'text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema, config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc',
int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key',
'jdbc', bool))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import unittest
from unittest.mock import patch
from fsqlfly.db_helper import *
from fsqlfly.tests.base_test import FSQLFlyTestCase
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector=
'text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type=
'both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name=
'a.b.c.d', connection=connection, resource_name=r_name,
schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = (self.
get_create_object())
self.session.add_all([connection, schema, schema2, r_name, t_name,
v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = '\n[jdbc]\ninsert_primary_key = false\n\n '
resource_name_config = '\n[jdbc]\ninsert_primary_key = true\n '
connection = Connection(name='a', url='#', type='hive', connector=
'text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=
connection, schema_version=schema, config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key',
'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc',
int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key',
'jdbc', bool))
if __name__ == '__main__':
unittest.main()
<|reserved_special_token_1|>
import unittest
from unittest.mock import patch
from fsqlfly.db_helper import *
from fsqlfly.tests.base_test import FSQLFlyTestCase
class MyTestCase(FSQLFlyTestCase):
def test_positive_delete(self):
namespace = Namespace(name='iii')
self.session.add(namespace)
self.session.commit()
t = Transform(name='test', sql='select 1;', namespace=namespace)
self.session.add(t)
self.session.commit()
self.session.delete(namespace)
self.session.commit()
self.assertEqual(self.session.query(Transform).count(), 0)
def get_create_object(self):
connection = Connection(name='a', url='#', type='hive', connector='text')
schema = SchemaEvent(name='test', connection=connection, version=1)
schema2 = SchemaEvent(name='test2', connection=connection, version=2)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)
t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,
schema_version=schema)
v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,
resource_name=r_name, schema_version=schema)
return connection, schema, schema2, r_name, t_name, v_name
def test_positive_delete_connection(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(connection)
self.session.commit()
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_connection_by_db_helper(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
DBSession.init_engine(self.engine)
with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):
res = DBDao.delete('connection', pk=connection.id)
self.assertEqual(res.success, True)
self.session.close()
self.session = self.get_session()
self.assertEqual(self.session.query(Connection).count(), 0)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 0)
def test_positive_delete_other(self):
connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()
self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])
self.session.commit()
self.session.delete(schema)
self.session.commit()
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceName).count(), 0)
self.assertEqual(self.session.query(Connection).count(), 1)
self.assertEqual(self.session.query(ResourceVersion).count(), 0)
self.assertEqual(self.session.query(ResourceTemplate).count(), 0)
self.assertEqual(self.session.query(SchemaEvent).count(), 1)
def test_get_connection_and_resource_name_config(self):
connection_config = """
[jdbc]
insert_primary_key = false
"""
resource_name_config = """
[jdbc]
insert_primary_key = true
"""
connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)
schema = SchemaEvent(name='test', connection=connection)
r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,
config=resource_name_config)
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))
self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)
self.assertTrue(r_name.get_config('example11') is None)
self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))
self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))
if __name__ == '__main__':
unittest.main()
|
flexible
|
{
"blob_id": "abbefb1e426408b32fa9e125c78b572de22dbb8c",
"index": 7493,
"step-1": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n <mask token>\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n <mask token>\n <mask token>\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\n<mask token>\n",
"step-4": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type=\n 'both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name=\n 'a.b.c.d', connection=connection, resource_name=r_name,\n schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = (self.\n get_create_object())\n self.session.add_all([connection, schema, schema2, r_name, t_name,\n v_name])\n self.session.commit()\n self.session.delete(schema)\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = '\\n[jdbc]\\ninsert_primary_key = false\\n\\n '\n resource_name_config = '\\n[jdbc]\\ninsert_primary_key = true\\n '\n connection = Connection(name='a', url='#', type='hive', connector=\n 'text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=\n connection, schema_version=schema, config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key',\n 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc',\n int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key',\n 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom unittest.mock import patch\nfrom fsqlfly.db_helper import *\nfrom fsqlfly.tests.base_test import FSQLFlyTestCase\n\n\nclass MyTestCase(FSQLFlyTestCase):\n def test_positive_delete(self):\n namespace = Namespace(name='iii')\n self.session.add(namespace)\n self.session.commit()\n\n t = Transform(name='test', sql='select 1;', namespace=namespace)\n self.session.add(t)\n self.session.commit()\n self.session.delete(namespace)\n self.session.commit()\n self.assertEqual(self.session.query(Transform).count(), 0)\n\n def get_create_object(self):\n connection = Connection(name='a', url='#', type='hive', connector='text')\n schema = SchemaEvent(name='test', connection=connection, version=1)\n schema2 = SchemaEvent(name='test2', connection=connection, version=2)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema)\n t_name = ResourceTemplate(name='c', resource_name=r_name, type='both', full_name='a.b.c', connection=connection,\n schema_version=schema)\n v_name = ResourceVersion(name='d', template=t_name, full_name='a.b.c.d', connection=connection,\n resource_name=r_name, schema_version=schema)\n return connection, schema, schema2, r_name, t_name, v_name\n\n def test_positive_delete_connection(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(connection)\n self.session.commit()\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 0)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_connection_by_db_helper(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n DBSession.init_engine(self.engine)\n with patch.object(settings, 'FSQLFLY_SAVE_MODE_DISABLE', True):\n res = DBDao.delete('connection', pk=connection.id)\n self.assertEqual(res.success, True)\n self.session.close()\n self.session = self.get_session()\n self.assertEqual(self.session.query(Connection).count(), 0)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 0)\n\n def test_positive_delete_other(self):\n connection, schema, schema2, r_name, t_name, v_name = self.get_create_object()\n\n self.session.add_all([connection, schema, schema2, r_name, t_name, v_name])\n self.session.commit()\n self.session.delete(schema)\n\n self.session.commit()\n self.assertEqual(self.session.query(Connection).count(), 1)\n self.assertEqual(self.session.query(ResourceName).count(), 0)\n self.assertEqual(self.session.query(Connection).count(), 1)\n\n self.assertEqual(self.session.query(ResourceVersion).count(), 0)\n self.assertEqual(self.session.query(ResourceTemplate).count(), 0)\n self.assertEqual(self.session.query(SchemaEvent).count(), 1)\n\n def test_get_connection_and_resource_name_config(self):\n connection_config = \"\"\"\n[jdbc]\ninsert_primary_key = false\n\n \"\"\"\n resource_name_config = \"\"\"\n[jdbc]\ninsert_primary_key = true\n \"\"\"\n connection = Connection(name='a', url='#', type='hive', connector='text', config=connection_config)\n schema = SchemaEvent(name='test', connection=connection)\n r_name = ResourceName(name='b', full_name='a.b', connection=connection, schema_version=schema,\n config=resource_name_config)\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertTrue(not r_name.get_config('add_read_partition_key', 'jdbc', bool))\n self.assertEqual(connection.get_config('read_partition_num', 'jdbc', int), 50)\n self.assertTrue(r_name.get_config('example11') is None)\n\n self.assertTrue(r_name.get_config('insert_primary_key', 'jdbc', bool))\n self.assertTrue(not connection.get_config('insert_primary_key', 'jdbc', bool))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
4,
5,
7,
9,
10
]
}
|
[
4,
5,
7,
9,
10
] |
<|reserved_special_token_0|>
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size
=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,
hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
x = self.fc1(x)
x = torch.sigmoid(x)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicBlock(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size
=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,
hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
x = self.fc1(x)
x = torch.sigmoid(x)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size
=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,
hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
x = self.fc1(x)
x = torch.sigmoid(x)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
<|reserved_special_token_0|>
def ResNet152(in_plane):
return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=
stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size
=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=stride, bias=
False), nn.BatchNorm2d(self.expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,
hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
x = self.fc1(x)
x = torch.sigmoid(x)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.
BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34(in_plane):
return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])
def ResNet50(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])
def ResNet101(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])
def ResNet152(in_plane):
return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes,
kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes,
kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.shape)
out = self.bn2(self.conv2(out))
# print(out.shape)
out += self.shortcut(x)
# print(out.shape)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
# print(out.shape)
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc1 = nn.Sequential(
nn.Linear(512 * block.expansion, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc2 = nn.Linear(hidden_dim, out_dim)
self.img_output_dim = None
self.drop_path_prob = 0.0
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
# print(strides)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
# print(nn.Sequential(*layers))
return nn.Sequential(*layers)
def extract_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
# print(out.shape)
return out
def sub_forward(self, x):
x = self.extract_feature(x)
# print(x.shape)
x = self.fc1(x)
# print(x.shape)
x = torch.sigmoid(x)
# print(x.shape)
return x
def forward(self, x0, x1):
x0 = self.sub_forward(x0)
if self.img_output_dim is None:
self.img_output_dim = x0.shape[1]
x1 = self.sub_forward(x1)
diff = torch.abs(x0 - x1)
scores = self.fc2(diff)
scores = torch.reshape(scores, (-1,))
# print(scores.shape)
return scores
class MLP_classifier(nn.Module):
def __init__(self, in_dim, hidden_dim=512, out_dim=10):
super(MLP_classifier, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(in_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc2 = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.BatchNorm1d(hidden_dim),
nn.ReLU(inplace=True)
)
self.fc3 = nn.Linear(hidden_dim, out_dim)
def forward(self, x):
x = x.detach()
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34(in_plane):
return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])
def ResNet50(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])
def ResNet101(in_plane):
return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])
def ResNet152(in_plane):
return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])
if __name__ == '__main__':
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
x0 = torch.rand(128, 1, 64, 64).to(device)
net = ResNet34(1).to(device)
out = net(x0, x0)
print(out)
|
flexible
|
{
"blob_id": "d3f42f329246164cdb6113df3da0eb2d3203b2a9",
"index": 7114,
"step-1": "<mask token>\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n <mask token>\n <mask token>\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=\n stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n<mask token>\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=\n stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size\n =1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.\n expansion * planes, kernel_size=1, stride=stride, bias=\n False), nn.BatchNorm2d(self.expansion * planes))\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(nn.Linear(512 * block.expansion,\n hidden_dim), nn.BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n return scores\n\n\nclass MLP_classifier(nn.Module):\n\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(nn.Linear(in_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc2 = nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.\n BatchNorm1d(hidden_dim), nn.ReLU(inplace=True))\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\n\ndef ResNet34(in_plane):\n return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])\n\n\ndef ResNet50(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])\n\n\ndef ResNet101(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\n\n<mask token>\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes,\n kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n\n self.conv2 = nn.Conv2d(planes, planes,\n kernel_size=3, stride=1,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # print(out.shape)\n out = self.bn2(self.conv2(out))\n # print(out.shape)\n out += self.shortcut(x)\n # print(out.shape)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion * planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion * planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion * planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n # print(out.shape)\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, in_plane, block, num_blocks, hidden_dim=512, out_dim=1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.conv1 = nn.Conv2d(in_plane, 64, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.fc1 = nn.Sequential(\n nn.Linear(512 * block.expansion, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc2 = nn.Linear(hidden_dim, out_dim)\n self.img_output_dim = None\n self.drop_path_prob = 0.0\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n # print(strides)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n # print(nn.Sequential(*layers))\n return nn.Sequential(*layers)\n\n def extract_feature(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n # print(out.shape)\n return out\n\n def sub_forward(self, x):\n x = self.extract_feature(x)\n # print(x.shape)\n x = self.fc1(x)\n # print(x.shape)\n x = torch.sigmoid(x)\n # print(x.shape)\n return x\n\n def forward(self, x0, x1):\n x0 = self.sub_forward(x0)\n\n if self.img_output_dim is None:\n self.img_output_dim = x0.shape[1]\n\n x1 = self.sub_forward(x1)\n diff = torch.abs(x0 - x1)\n scores = self.fc2(diff)\n scores = torch.reshape(scores, (-1,))\n # print(scores.shape)\n return scores\n\n\nclass MLP_classifier(nn.Module):\n def __init__(self, in_dim, hidden_dim=512, out_dim=10):\n super(MLP_classifier, self).__init__()\n self.fc1 = nn.Sequential(\n nn.Linear(in_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc2 = nn.Sequential(\n nn.Linear(hidden_dim, hidden_dim),\n nn.BatchNorm1d(hidden_dim),\n nn.ReLU(inplace=True)\n )\n self.fc3 = nn.Linear(hidden_dim, out_dim)\n\n def forward(self, x):\n x = x.detach()\n out = self.fc1(x)\n out = self.fc2(out)\n out = self.fc3(out)\n return out\n\n\n\ndef ResNet18():\n return ResNet(BasicBlock, [2, 2, 2, 2])\n\n\ndef ResNet34(in_plane):\n return ResNet(in_plane, BasicBlock, [3, 4, 6, 3])\n\n\ndef ResNet50(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 6, 3])\n\n\ndef ResNet101(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 4, 23, 3])\n\n\ndef ResNet152(in_plane):\n return ResNet(in_plane, Bottleneck, [3, 8, 36, 3])\n\nif __name__ == '__main__':\n device = 'cuda:0' if torch.cuda.is_available() else 'cpu'\n device = torch.device(device)\n x0 = torch.rand(128, 1, 64, 64).to(device)\n net = ResNet34(1).to(device)\n out = net(x0, x0)\n print(out)",
"step-ids": [
13,
15,
18,
22,
25
]
}
|
[
13,
15,
18,
22,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('articles', '0014_auto_20180726_0926')]
operations = [migrations.AlterField(model_name='articles', name=
'cover_url', field=models.URLField(default=
'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='series', name='cover_url', field=models.URLField(
default=
'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='specialcolumn', name='cover_url', field=models.URLField
(default=
'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'
, max_length=500, verbose_name='封面图'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('articles', '0014_auto_20180726_0926')]
operations = [migrations.AlterField(model_name='articles', name=
'cover_url', field=models.URLField(default=
'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='series', name='cover_url', field=models.URLField(
default=
'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',
max_length=500, verbose_name='封面图')), migrations.AlterField(
model_name='specialcolumn', name='cover_url', field=models.URLField
(default=
'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'
, max_length=500, verbose_name='封面图'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-07-26 19:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0014_auto_20180726_0926'),
]
operations = [
migrations.AlterField(
model_name='articles',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg', max_length=500, verbose_name='封面图'),
),
migrations.AlterField(
model_name='series',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg', max_length=500, verbose_name='封面图'),
),
migrations.AlterField(
model_name='specialcolumn',
name='cover_url',
field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg', max_length=500, verbose_name='封面图'),
),
]
|
flexible
|
{
"blob_id": "671a7ee3fabee6ed8dfafe1bddefb1f94322b0e5",
"index": 2477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articles', '0014_auto_20180726_0926')]\n operations = [migrations.AlterField(model_name='articles', name=\n 'cover_url', field=models.URLField(default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',\n max_length=500, verbose_name='封面图')), migrations.AlterField(\n model_name='series', name='cover_url', field=models.URLField(\n default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',\n max_length=500, verbose_name='封面图')), migrations.AlterField(\n model_name='specialcolumn', name='cover_url', field=models.URLField\n (default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'\n , max_length=500, verbose_name='封面图'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articles', '0014_auto_20180726_0926')]\n operations = [migrations.AlterField(model_name='articles', name=\n 'cover_url', field=models.URLField(default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg',\n max_length=500, verbose_name='封面图')), migrations.AlterField(\n model_name='series', name='cover_url', field=models.URLField(\n default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg',\n max_length=500, verbose_name='封面图')), migrations.AlterField(\n model_name='specialcolumn', name='cover_url', field=models.URLField\n (default=\n 'http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg'\n , max_length=500, verbose_name='封面图'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.12 on 2018-07-26 19:11\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articles', '0014_auto_20180726_0926'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='articles',\n name='cover_url',\n field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/articles/article_01.jpg', max_length=500, verbose_name='封面图'),\n ),\n migrations.AlterField(\n model_name='series',\n name='cover_url',\n field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/series/series_01.jpg', max_length=500, verbose_name='封面图'),\n ),\n migrations.AlterField(\n model_name='specialcolumn',\n name='cover_url',\n field=models.URLField(default='http://pcgsvdl00.bkt.clouddn.com/default/specialColumn/special_01.jpg', max_length=500, verbose_name='封面图'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
RABBITMQ_IP = '172.23.105.82'
OBJECT_CACHE_IP = '172.23.105.69'
OBJECT_CACHE_PORT = '11911'
SERIESLY_IP = ''
COUCHBASE_IP = '172.23.105.54'
COUCHBASE_PORT = '8091'
COUCHBASE_USER = 'Administrator'
COUCHBASE_PWD = 'password'
SSH_USER = 'root'
SSH_PASSWORD = 'password'
WORKERS = ['127.0.0.1']
WORKER_CONFIGS = ['all']
CB_CLUSTER_TAG = 'default'
CLUSTER_IPS = ['172.23.105.54', '172.23.105.57', '172.23.105.62',
'172.23.105.55']
<|reserved_special_token_0|>
REMOTE_SITES = {'remote1': {'RABBITMQ_IP': '172.23.105.99',
'CB_CLUSTER_TAG': 'default', 'COUCHBASE_IP': '172.23.105.58',
'COUCHBASE_PORT': '8091'}}
LOGDIR = 'logs'
ENABLE_BACKUPS = False
BACKUP_DIR = '/tmp/backup'
BACKUP_NODE_IP = '127.0.0.1'
BACKUP_NODE_SSH_USER = 'root'
BACKUP_NODE_SSH_PWD = 'password'
<|reserved_special_token_1|>
#TODO: allow workers to pull this from cache
RABBITMQ_IP = '172.23.105.82'
OBJECT_CACHE_IP = "172.23.105.69"
OBJECT_CACHE_PORT = "11911"
SERIESLY_IP = ''
COUCHBASE_IP = '172.23.105.54'
COUCHBASE_PORT = '8091'
COUCHBASE_USER = "Administrator"
COUCHBASE_PWD = "password"
SSH_USER = "root"
SSH_PASSWORD = "password"
WORKERS = ['127.0.0.1']
WORKER_CONFIGS = ["all"]
CB_CLUSTER_TAG = "default"
CLUSTER_IPS = ["172.23.105.54", "172.23.105.57", "172.23.105.62", "172.23.105.55"]
# xdcr config
"""
" pointer information to remote sites
" remote1 = name for remote site
" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)
" this should equal RABBITMQ_IP of remote site
" CB_CLUSTER_TAG = represents vhost watched by workers remote site.
" this should equal CB_CLUSTER_TAG of remote site
" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site
"""
REMOTE_SITES = {"remote1" : {"RABBITMQ_IP" : "172.23.105.99",
"CB_CLUSTER_TAG" : "default",
"COUCHBASE_IP" : "172.23.105.58",
"COUCHBASE_PORT" : "8091"}}
LOGDIR="logs" # relative to current dir
#Backup Config
ENABLE_BACKUPS = False
BACKUP_DIR = "/tmp/backup"
BACKUP_NODE_IP = "127.0.0.1"
BACKUP_NODE_SSH_USER = "root"
BACKUP_NODE_SSH_PWD = "password"
|
flexible
|
{
"blob_id": "e70ebd9bb9cd7027772ec117cb91349afba7ab10",
"index": 6390,
"step-1": "<mask token>\n",
"step-2": "RABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = '172.23.105.69'\nOBJECT_CACHE_PORT = '11911'\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = 'Administrator'\nCOUCHBASE_PWD = 'password'\nSSH_USER = 'root'\nSSH_PASSWORD = 'password'\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = ['all']\nCB_CLUSTER_TAG = 'default'\nCLUSTER_IPS = ['172.23.105.54', '172.23.105.57', '172.23.105.62',\n '172.23.105.55']\n<mask token>\nREMOTE_SITES = {'remote1': {'RABBITMQ_IP': '172.23.105.99',\n 'CB_CLUSTER_TAG': 'default', 'COUCHBASE_IP': '172.23.105.58',\n 'COUCHBASE_PORT': '8091'}}\nLOGDIR = 'logs'\nENABLE_BACKUPS = False\nBACKUP_DIR = '/tmp/backup'\nBACKUP_NODE_IP = '127.0.0.1'\nBACKUP_NODE_SSH_USER = 'root'\nBACKUP_NODE_SSH_PWD = 'password'\n",
"step-3": "#TODO: allow workers to pull this from cache\n\nRABBITMQ_IP = '172.23.105.82'\nOBJECT_CACHE_IP = \"172.23.105.69\"\nOBJECT_CACHE_PORT = \"11911\"\nSERIESLY_IP = ''\nCOUCHBASE_IP = '172.23.105.54'\nCOUCHBASE_PORT = '8091'\nCOUCHBASE_USER = \"Administrator\"\nCOUCHBASE_PWD = \"password\"\nSSH_USER = \"root\"\nSSH_PASSWORD = \"password\"\nWORKERS = ['127.0.0.1']\nWORKER_CONFIGS = [\"all\"]\nCB_CLUSTER_TAG = \"default\"\n\nCLUSTER_IPS = [\"172.23.105.54\", \"172.23.105.57\", \"172.23.105.62\", \"172.23.105.55\"]\n\n# xdcr config\n\"\"\"\n\" pointer information to remote sites\n\" remote1 = name for remote site\n\" RABBITMQ_IP = broker managing remote site (can be same as local broker if using different vhosts)\n\" this should equal RABBITMQ_IP of remote site\n\" CB_CLUSTER_TAG = represents vhost watched by workers remote site.\n\" this should equal CB_CLUSTER_TAG of remote site\n\" COUCHBASE_IP/PORT = IP/PORT of a couchbase node in remote site\n\"\"\"\nREMOTE_SITES = {\"remote1\" : {\"RABBITMQ_IP\" : \"172.23.105.99\",\n \"CB_CLUSTER_TAG\" : \"default\",\n \"COUCHBASE_IP\" : \"172.23.105.58\",\n \"COUCHBASE_PORT\" : \"8091\"}}\n\nLOGDIR=\"logs\" # relative to current dir\n\n\n#Backup Config\nENABLE_BACKUPS = False\nBACKUP_DIR = \"/tmp/backup\"\nBACKUP_NODE_IP = \"127.0.0.1\"\nBACKUP_NODE_SSH_USER = \"root\"\nBACKUP_NODE_SSH_PWD = \"password\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class HTTPMethodView:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
if handler:
return handler(request, *args, **kwargs)
raise InvalidUsage('Method {} not allowed for URL {}'.format(
request.method, request.url), status_code=405)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HTTPMethodView:
<|reserved_special_token_0|>
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
if handler:
return handler(request, *args, **kwargs)
raise InvalidUsage('Method {} not allowed for URL {}'.format(
request.method, request.url), status_code=405)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
""" Converts the class into an actual view function that can be used
with the routing system.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
return view
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HTTPMethodView:
""" Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
to every HTTP method you want to support.
For example:
class DummyView(HTTPMethodView):
def get(self, request, *args, **kwargs):
return text('I am get method')
def put(self, request, *args, **kwargs):
return text('I am put method')
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
If you need any url params just mention them in method definition:
class DummyView(HTTPMethodView):
def get(self, request, my_param_here, *args, **kwargs):
return text('I am get method with %s' % my_param_here)
To add the view into the routing you could use
1) app.add_route(DummyView.as_view(), '/')
2) app.route('/')(DummyView.as_view())
To add any decorator you could set it into decorators variable
"""
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
if handler:
return handler(request, *args, **kwargs)
raise InvalidUsage('Method {} not allowed for URL {}'.format(
request.method, request.url), status_code=405)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
""" Converts the class into an actual view function that can be used
with the routing system.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
return view
<|reserved_special_token_1|>
from .exceptions import InvalidUsage
class HTTPMethodView:
""" Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
to every HTTP method you want to support.
For example:
class DummyView(HTTPMethodView):
def get(self, request, *args, **kwargs):
return text('I am get method')
def put(self, request, *args, **kwargs):
return text('I am put method')
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
If you need any url params just mention them in method definition:
class DummyView(HTTPMethodView):
def get(self, request, my_param_here, *args, **kwargs):
return text('I am get method with %s' % my_param_here)
To add the view into the routing you could use
1) app.add_route(DummyView.as_view(), '/')
2) app.route('/')(DummyView.as_view())
To add any decorator you could set it into decorators variable
"""
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
if handler:
return handler(request, *args, **kwargs)
raise InvalidUsage('Method {} not allowed for URL {}'.format(
request.method, request.url), status_code=405)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
""" Converts the class into an actual view function that can be used
with the routing system.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
return view
|
flexible
|
{
"blob_id": "4948fd2062bdbd32bfa32d2b0e24587f0872132d",
"index": 4686,
"step-1": "<mask token>\n\n\nclass HTTPMethodView:\n <mask token>\n <mask token>\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass HTTPMethodView:\n <mask token>\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n",
"step-3": "<mask token>\n\n\nclass HTTPMethodView:\n \"\"\" Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n class DummyView(HTTPMethodView):\n\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n etc.\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n class DummyView(HTTPMethodView):\n\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n 1) app.add_route(DummyView.as_view(), '/')\n 2) app.route('/')(DummyView.as_view())\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n",
"step-4": "from .exceptions import InvalidUsage\n\n\nclass HTTPMethodView:\n \"\"\" Simple class based implementation of view for the sanic.\n You should implement methods (get, post, put, patch, delete) for the class\n to every HTTP method you want to support.\n\n For example:\n class DummyView(HTTPMethodView):\n\n def get(self, request, *args, **kwargs):\n return text('I am get method')\n\n def put(self, request, *args, **kwargs):\n return text('I am put method')\n etc.\n\n If someone tries to use a non-implemented method, there will be a\n 405 response.\n\n If you need any url params just mention them in method definition:\n class DummyView(HTTPMethodView):\n\n def get(self, request, my_param_here, *args, **kwargs):\n return text('I am get method with %s' % my_param_here)\n\n To add the view into the routing you could use\n 1) app.add_route(DummyView.as_view(), '/')\n 2) app.route('/')(DummyView.as_view())\n\n To add any decorator you could set it into decorators variable\n \"\"\"\n decorators = []\n\n def dispatch_request(self, request, *args, **kwargs):\n handler = getattr(self, request.method.lower(), None)\n if handler:\n return handler(request, *args, **kwargs)\n raise InvalidUsage('Method {} not allowed for URL {}'.format(\n request.method, request.url), status_code=405)\n\n @classmethod\n def as_view(cls, *class_args, **class_kwargs):\n \"\"\" Converts the class into an actual view function that can be used\n with the routing system.\n\n \"\"\"\n\n def view(*args, **kwargs):\n self = view.view_class(*class_args, **class_kwargs)\n return self.dispatch_request(*args, **kwargs)\n if cls.decorators:\n view.__module__ = cls.__module__\n for decorator in cls.decorators:\n view = decorator(view)\n view.view_class = cls\n view.__doc__ = cls.__doc__\n view.__module__ = cls.__module__\n return view\n",
"step-5": null,
"step-ids": [
2,
4,
5,
6
]
}
|
[
2,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def fonct(valeur, a=None):
if type(a) is list:
a.append(valeur)
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a += valeur
return a
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def fonct(valeur, a=None):
if type(a) is list:
a.append(valeur)
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a += valeur
return a
print(fonct(4, [1, 2, 3]))
print(fonct(4, 'eg'))
print(fonct(4, (1, 2, 3)))
print(fonct(4, {1, 2, 3}))
<|reserved_special_token_1|>
def fonct(valeur, a= None):
if type(a) is list:
a.append(valeur)
# a+= valeur
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a+= valeur
return(a)
print(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]
print(fonct(4, 'eg' )) # eg4
print(fonct(4, (1,2,3))) # (1, 2, 3, 4)
print(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)
|
flexible
|
{
"blob_id": "2a13fffa105a5dd546c30c892e59888eb6ead996",
"index": 4645,
"step-1": "<mask token>\n",
"step-2": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\n<mask token>\n",
"step-3": "def fonct(valeur, a=None):\n if type(a) is list:\n a.append(valeur)\n elif type(a) is tuple:\n a += tuple((valeur,))\n elif type(a) is str:\n a += str(valeur)\n elif type(a) is set:\n a.add(valeur)\n else:\n a += valeur\n return a\n\n\nprint(fonct(4, [1, 2, 3]))\nprint(fonct(4, 'eg'))\nprint(fonct(4, (1, 2, 3)))\nprint(fonct(4, {1, 2, 3}))\n",
"step-4": "def fonct(valeur, a= None):\n if type(a) is list:\n a.append(valeur)\n # a+= valeur\n elif type(a) is tuple: \n a += tuple((valeur,)) \n elif type(a) is str: \n a += str(valeur) \n elif type(a) is set: \n a.add(valeur) \n else:\n a+= valeur\n return(a)\n\nprint(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]\nprint(fonct(4, 'eg' )) # eg4\nprint(fonct(4, (1,2,3))) # (1, 2, 3, 4)\nprint(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
<|reserved_special_token_0|>
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
<|reserved_special_token_1|>
<|reserved_special_token_0|>
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}
current_abpath = abspath(dirname(__file__)) + '/'
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + 'imgs\\'
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
<|reserved_special_token_1|>
from os.path import dirname, abspath
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}
current_abpath = abspath(dirname(__file__)) + '/'
if current_abpath[-12:] == 'library.zip/':
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + 'imgs\\'
def get_item_colors():
"""
>>> get_item_colors()
"""
result = []
if not PICKITEMSP:
return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS['legendary']
return result
<|reserved_special_token_1|>
#!/usr/bin/python
# coding: utf-8
from os.path import dirname, abspath
PICKITEMSP = True
RAREP = True
REPAIRP = False
ITEMS = {
"legendary": ["#02CE01", # set
"#BF642F"], # legndary
"rare": ["#BBBB00"]
}
current_abpath = abspath(dirname(__file__)) + "/"
# With py2exe the dirname is INSTPATH/server/library.zip. So
# current_abpath will be INSTPATH/server/library.zip/
if current_abpath[-12:] == "library.zip/":
current_abpath = current_abpath[:-12]
imgs_dir = current_abpath + "imgs\\"
def get_item_colors():
'''
>>> get_item_colors()
'''
result = []
if not PICKITEMSP: return result
if RAREP:
for a in ITEMS:
result += ITEMS[a]
return result
else:
result = ITEMS["legendary"]
return result
|
flexible
|
{
"blob_id": "927b42326ad62f5e484fd7016c42a44b93609f83",
"index": 1296,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\n<mask token>\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n",
"step-3": "<mask token>\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n",
"step-4": "from os.path import dirname, abspath\nPICKITEMSP = True\nRAREP = True\nREPAIRP = False\nITEMS = {'legendary': ['#02CE01', '#BF642F'], 'rare': ['#BBBB00']}\ncurrent_abpath = abspath(dirname(__file__)) + '/'\nif current_abpath[-12:] == 'library.zip/':\n current_abpath = current_abpath[:-12]\nimgs_dir = current_abpath + 'imgs\\\\'\n\n\ndef get_item_colors():\n \"\"\"\n >>> get_item_colors()\n \"\"\"\n result = []\n if not PICKITEMSP:\n return result\n if RAREP:\n for a in ITEMS:\n result += ITEMS[a]\n return result\n else:\n result = ITEMS['legendary']\n return result\n",
"step-5": "#!/usr/bin/python\r\n# coding: utf-8\r\n\r\nfrom os.path import dirname, abspath\r\n\r\nPICKITEMSP = True\r\nRAREP\t = True\r\nREPAIRP = False\r\n\r\nITEMS = {\r\n \"legendary\": [\"#02CE01\", # set\r\n \"#BF642F\"], # legndary\r\n \"rare\":\t [\"#BBBB00\"]\r\n }\r\n\r\ncurrent_abpath = abspath(dirname(__file__)) + \"/\"\r\n# With py2exe the dirname is INSTPATH/server/library.zip. So\r\n# current_abpath will be INSTPATH/server/library.zip/\r\nif current_abpath[-12:] == \"library.zip/\":\r\n current_abpath = current_abpath[:-12]\r\n\r\nimgs_dir = current_abpath + \"imgs\\\\\"\r\n\r\n\r\ndef get_item_colors():\r\n '''\r\n >>> get_item_colors()\r\n '''\r\n result = []\r\n if not PICKITEMSP: return result\r\n \r\n if RAREP:\r\n for a in ITEMS:\r\n result += ITEMS[a]\r\n return result\r\n else:\r\n result = ITEMS[\"legendary\"]\r\n return result\r\n \r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts'))
print(search_pattern.match('nuts...'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'
search_pattern = re.compile('nuts')
search_match_object = search_pattern.search(text)
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts'))
print(search_pattern.match('nuts...'))
<|reserved_special_token_1|>
import re
text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'
search_pattern = re.compile('nuts')
search_match_object = search_pattern.search(text)
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts'))
print(search_pattern.match('nuts...'))
<|reserved_special_token_1|>
import re
text = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'
search_pattern = re.compile('nuts')
search_match_object = search_pattern.search(text)
if search_match_object:
print(search_match_object.span())
print(search_match_object.start())
print(search_match_object.end())
print(search_match_object.group())
# Other methods of pattern
print(search_pattern.findall(text))
print(search_pattern.fullmatch('nuts')) # The entire string must match
print(search_pattern.match('nuts...')) # Start of the string must match
|
flexible
|
{
"blob_id": "ef5d235f09eea827b240290218c397f880f1046d",
"index": 4433,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-3": "<mask token>\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-4": "import re\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts'))\nprint(search_pattern.match('nuts...'))\n",
"step-5": "import re\n\ntext = 'Macademia nuts, Honey tuile, Cocoa powder, Pistachio nuts'\nsearch_pattern = re.compile('nuts')\nsearch_match_object = search_pattern.search(text)\n\nif search_match_object:\n print(search_match_object.span())\n print(search_match_object.start())\n print(search_match_object.end())\n print(search_match_object.group())\n\n# Other methods of pattern\nprint(search_pattern.findall(text))\nprint(search_pattern.fullmatch('nuts')) # The entire string must match\nprint(search_pattern.match('nuts...')) # Start of the string must match\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
default_app_config = "assistant.additionalpage.apps.AdditionalPageAppConfig"
|
normal
|
{
"blob_id": "0e2c71ab4f194af3c2ee65c2cbd6f36921eb587e",
"index": 2079,
"step-1": "<mask token>\n",
"step-2": "default_app_config = 'assistant.additionalpage.apps.AdditionalPageAppConfig'\n",
"step-3": "default_app_config = \"assistant.additionalpage.apps.AdditionalPageAppConfig\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for _ in range(int(input())):
b.append(A > set(map(int, input().split())))
print(all(b))
<|reserved_special_token_1|>
A = set(map(int, input().split()))
b = []
for _ in range(int(input())):
b.append(A > set(map(int, input().split())))
print(all(b))
<|reserved_special_token_1|>
#!/usr/bin/env python3
# given a set A and n other sets.
# find whether set A is a strict superset of each of the n sets
# print True if yes, otherwise False
A = set(map(int, input().split()))
b = []
for _ in range(int(input())):
b.append(A > set(map(int, input().split())))
print(all(b))
|
flexible
|
{
"blob_id": "a9eb2b3f26396918c792de3f126e51bde334b709",
"index": 7777,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n",
"step-3": "A = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\nprint(all(b))\n",
"step-4": "#!/usr/bin/env python3\n\n# given a set A and n other sets.\n# find whether set A is a strict superset of each of the n sets\n# print True if yes, otherwise False\n\nA = set(map(int, input().split()))\nb = []\nfor _ in range(int(input())):\n b.append(A > set(map(int, input().split())))\n\nprint(all(b))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:42:09 2019
@author: Administrator
"""
from config.path_config import *
import GV
def ReadTxtName(rootdir):
#读取文件中的每一行,转为list
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
return lines
def project_query_lz_main(question):
#找语句中是否匹配到了项目名称
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
#print('我们觉得您是想查' + project_name + '项目的信息')
GV.SHOW = True
return ('我们觉得您是想查' + project_name +
'项目的信息,但是我们还没有记录项目详细信息')
GV.FLAG = 3
GV.SHOW = False
#state = False
#print('与项目无关,此处跳出,接其他模块')
return question
#project_query_lz_main('工银天梭项目进度怎么样了',2)
|
normal
|
{
"blob_id": "92bbccfbfebf905965c9cb0f1a85ffaa7d0cf6b5",
"index": 3796,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-3": "<mask token>\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-4": "<mask token>\nfrom config.path_config import *\nimport GV\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 19 13:42:09 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom config.path_config import *\r\nimport GV\r\n \r\ndef ReadTxtName(rootdir):\r\n #读取文件中的每一行,转为list\r\n lines = []\r\n with open(rootdir, 'r') as file_to_read:\r\n while True:\r\n line = file_to_read.readline()\r\n if not line:\r\n break\r\n line = line.strip('\\n')\r\n lines.append(line)\r\n return lines\r\n\r\ndef project_query_lz_main(question):\r\n #找语句中是否匹配到了项目名称\r\n txt_line = ReadTxtName(PROJECT_NAMES) \r\n for project_name in txt_line:\r\n if project_name in question:\r\n #print('我们觉得您是想查' + project_name + '项目的信息')\r\n GV.SHOW = True\r\n return ('我们觉得您是想查' + project_name + \r\n '项目的信息,但是我们还没有记录项目详细信息')\r\n GV.FLAG = 3\r\n GV.SHOW = False\r\n #state = False\r\n #print('与项目无关,此处跳出,接其他模块')\r\n return question\r\n\r\n#project_query_lz_main('工银天梭项目进度怎么样了',2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import unittest
from datetime import datetime
from models import *
class Test_PlaceModel(unittest.TestCase):
"""
Test the place model class
"""
def setUp(self):
self.model = Place()
self.model.save()
def test_var_initialization(self):
self.assertTrue(hasattr(self.model, "city_id"))
self.assertTrue(hasattr(self.model, "user_id"))
self.assertTrue(hasattr(self.model, "name"))
self.assertTrue(hasattr(self.model, "description"))
self.assertTrue(hasattr(self.model, "number_rooms"))
self.assertTrue(hasattr(self.model, "number_bathrooms"))
self.assertTrue(hasattr(self.model, "max_guest"))
self.assertTrue(hasattr(self.model, "price_by_night"))
self.assertTrue(hasattr(self.model, "latitude"))
self.assertTrue(hasattr(self.model, "longitude"))
self.assertTrue(hasattr(self.model, "amenities"))
self.assertEqual(self.model.city_id, "")
self.assertEqual(self.model.user_id, "")
self.assertEqual(self.model.name, "")
self.assertEqual(self.model.description, "")
self.assertEqual(self.model.number_rooms, 0)
self.assertEqual(self.model.number_bathrooms, 0)
self.assertEqual(self.model.max_guest, 0)
self.assertEqual(self.model.price_by_night, 0)
self.assertEqual(self.model.latitude, 0.0)
self.assertEqual(self.model.longitude, 0.0)
self.assertEqual(self.model.amenities, [''])
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "c7881c0d06600a43bdc01f5e464127c596db6713",
"index": 7993,
"step-1": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, 'city_id'))\n self.assertTrue(hasattr(self.model, 'user_id'))\n self.assertTrue(hasattr(self.model, 'name'))\n self.assertTrue(hasattr(self.model, 'description'))\n self.assertTrue(hasattr(self.model, 'number_rooms'))\n self.assertTrue(hasattr(self.model, 'number_bathrooms'))\n self.assertTrue(hasattr(self.model, 'max_guest'))\n self.assertTrue(hasattr(self.model, 'price_by_night'))\n self.assertTrue(hasattr(self.model, 'latitude'))\n self.assertTrue(hasattr(self.model, 'longitude'))\n self.assertTrue(hasattr(self.model, 'amenities'))\n self.assertEqual(self.model.city_id, '')\n self.assertEqual(self.model.user_id, '')\n self.assertEqual(self.model.name, '')\n self.assertEqual(self.model.description, '')\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom datetime import datetime\nfrom models import *\n\n\nclass Test_PlaceModel(unittest.TestCase):\n \"\"\"\n Test the place model class\n \"\"\"\n\n def setUp(self):\n self.model = Place()\n self.model.save()\n\n def test_var_initialization(self):\n self.assertTrue(hasattr(self.model, \"city_id\"))\n self.assertTrue(hasattr(self.model, \"user_id\"))\n self.assertTrue(hasattr(self.model, \"name\"))\n self.assertTrue(hasattr(self.model, \"description\"))\n self.assertTrue(hasattr(self.model, \"number_rooms\"))\n self.assertTrue(hasattr(self.model, \"number_bathrooms\"))\n self.assertTrue(hasattr(self.model, \"max_guest\"))\n self.assertTrue(hasattr(self.model, \"price_by_night\"))\n self.assertTrue(hasattr(self.model, \"latitude\"))\n self.assertTrue(hasattr(self.model, \"longitude\"))\n self.assertTrue(hasattr(self.model, \"amenities\"))\n self.assertEqual(self.model.city_id, \"\")\n self.assertEqual(self.model.user_id, \"\")\n self.assertEqual(self.model.name, \"\")\n self.assertEqual(self.model.description, \"\")\n self.assertEqual(self.model.number_rooms, 0)\n self.assertEqual(self.model.number_bathrooms, 0)\n self.assertEqual(self.model.max_guest, 0)\n self.assertEqual(self.model.price_by_night, 0)\n self.assertEqual(self.model.latitude, 0.0)\n self.assertEqual(self.model.longitude, 0.0)\n self.assertEqual(self.model.amenities, [''])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def thresholding(img):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
<|reserved_special_token_0|>
def nothing(a):
pass
<|reserved_special_token_0|>
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')
heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')
widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')
heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')
points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),
(widthBottom, heightBottom), (wT - widthBottom, heightBottom)])
return points
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0,
255), cv2.FILLED)
return img
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def thresholding(img):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
def warpImg(img, points, w, h, inv=False):
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
if inv:
matrix = cv2.getPerspectiveTransform(pts2, pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
return imgWarp
def nothing(a):
pass
<|reserved_special_token_0|>
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')
heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')
widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')
heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')
points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),
(widthBottom, heightBottom), (wT - widthBottom, heightBottom)])
return points
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0,
255), cv2.FILLED)
return img
def getHistogram(img, minPer=0.1, display=False, region=1):
if region == 1:
histValues = np.sum(img, axis=0)
else:
histValues = np.sum(img[img.shape[0] // region:, :], axis=0)
maxValue = np.max(histValues)
minValue = minPer * maxValue
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
if display:
imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
for x, intensity in enumerate(histValues):
cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] -
intensity // 255 // region), (255, 0, 255), 1)
cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255
), cv2.FILLED)
return basePoint, imgHist
return basePoint
def stackImages(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),
None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0
][0].shape[1], imgArray[0][0].shape[0]), None,
scale, scale)
if len(imgArray[x][y].shape) == 2:
imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.
COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,
scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],
imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2:
imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def thresholding(img):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
def warpImg(img, points, w, h, inv=False):
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
if inv:
matrix = cv2.getPerspectiveTransform(pts2, pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
return imgWarp
def nothing(a):
pass
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
cv2.namedWindow('Trackbars')
cv2.resizeWindow('Trackbars', 360, 240)
cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //
2, nothing)
cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],
hT, nothing)
cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],
wT // 2, nothing)
cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],
hT, nothing)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')
heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')
widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')
heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')
points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),
(widthBottom, heightBottom), (wT - widthBottom, heightBottom)])
return points
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0,
255), cv2.FILLED)
return img
def getHistogram(img, minPer=0.1, display=False, region=1):
if region == 1:
histValues = np.sum(img, axis=0)
else:
histValues = np.sum(img[img.shape[0] // region:, :], axis=0)
maxValue = np.max(histValues)
minValue = minPer * maxValue
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
if display:
imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
for x, intensity in enumerate(histValues):
cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] -
intensity // 255 // region), (255, 0, 255), 1)
cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255
), cv2.FILLED)
return basePoint, imgHist
return basePoint
def stackImages(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),
None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0
][0].shape[1], imgArray[0][0].shape[0]), None,
scale, scale)
if len(imgArray[x][y].shape) == 2:
imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.
COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,
scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],
imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2:
imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
<|reserved_special_token_1|>
import cv2
import numpy as np
def thresholding(img):
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
def warpImg(img, points, w, h, inv=False):
pts1 = np.float32(points)
pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])
if inv:
matrix = cv2.getPerspectiveTransform(pts2, pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1, pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w, h))
return imgWarp
def nothing(a):
pass
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
cv2.namedWindow('Trackbars')
cv2.resizeWindow('Trackbars', 360, 240)
cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //
2, nothing)
cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],
hT, nothing)
cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],
wT // 2, nothing)
cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],
hT, nothing)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')
heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')
widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')
heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')
points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),
(widthBottom, heightBottom), (wT - widthBottom, heightBottom)])
return points
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0,
255), cv2.FILLED)
return img
def getHistogram(img, minPer=0.1, display=False, region=1):
if region == 1:
histValues = np.sum(img, axis=0)
else:
histValues = np.sum(img[img.shape[0] // region:, :], axis=0)
maxValue = np.max(histValues)
minValue = minPer * maxValue
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
if display:
imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
for x, intensity in enumerate(histValues):
cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] -
intensity // 255 // region), (255, 0, 255), 1)
cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255
), cv2.FILLED)
return basePoint, imgHist
return basePoint
def stackImages(scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),
None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0
][0].shape[1], imgArray[0][0].shape[0]), None,
scale, scale)
if len(imgArray[x][y].shape) == 2:
imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.
COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,
scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],
imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2:
imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
<|reserved_special_token_1|>
import cv2
import numpy as np
# THRESHOLDING FUNCTION IMPLEMENTATION
def thresholding(img):
# visualizing image in HSV parameters
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the
# trackbar by running ColorPickerScript.py
lowerWhite = np.array([80, 0, 0])
upperWhite = np.array([255, 160, 255])
# passing the values of lowerWhite and upperWhite to create the mask
maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)
return maskWhite
# WARPING FUNCTION IMPLEMENTATION
def warpImg (img, points, w, h, inv=False):
pts1 = np.float32(points)
# defining the border coordinates of the warped image
pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])
# finding the transformation matrix
if inv:
#if inverted interchange pts2 and pts1
matrix = cv2.getPerspectiveTransform(pts2,pts1)
else:
matrix = cv2.getPerspectiveTransform(pts1,pts2)
imgWarp = cv2.warpPerspective(img, matrix, (w,h))
return imgWarp
# trackbar change will call nothing()
def nothing(a):
pass
# Creating the trackbars to find the optimal warping points.
# Care should be taken to choose points which are not very far from our current position
# ie. mostly lying in the bottom half region of the image since we should only confidently
# predict the lane warp present on the road at this point of time.
# create trackbars
def initializeTrackbars(initialTrackbarVals, wT=480, hT=240):
# wT and hT are the target window dimensions ie. window with video
# create trackbar window
cv2.namedWindow("Trackbars")
cv2.resizeWindow("Trackbars", 360, 240)
cv2.createTrackbar("Width Top", "Trackbars", initialTrackbarVals[0], wT//2, nothing)
cv2.createTrackbar("Height Top", "Trackbars", initialTrackbarVals[1], hT, nothing)
cv2.createTrackbar("Width Bottom", "Trackbars", initialTrackbarVals[2], wT//2, nothing)
cv2.createTrackbar("Height Bottom", "Trackbars", initialTrackbarVals[3], hT, nothing)
# find the value of trackbars (real-time)
def valTrackbars(wT=480, hT=240):
widthTop = cv2.getTrackbarPos("Width Top", "Trackbars")
heightTop = cv2.getTrackbarPos("Height Top", "Trackbars")
widthBottom = cv2.getTrackbarPos("Width Bottom", "Trackbars")
heightBottom = cv2.getTrackbarPos("Height Bottom", "Trackbars")
# return the bounding coordinates
points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])
return points
# draw the warp points as red circles
def drawPoints(img, points):
for x in range(0, 4):
cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)
return img
# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)
def getHistogram(img, minPer=0.1, display= False, region=1):
# simply sum all the pixels in the y direction
if region == 1:
# find histvalues for the complete region
histValues = np.sum(img, axis=0)
else:
# find histvalues for ONLY the bottom (1/n)th region where n is region value
histValues = np.sum(img[img.shape[0]//region:,:], axis=0)
#print(histValues)
# Some of the pixels in our image might just be noise. So we don’t want to use them in our
# calculation. Therefore we will set a threshold value which will be the minimum value required
# for any column to qualify as part of the path and not noise. We can set a hard-coded value but
# it is better to get it based on the live data. So we will find the maximum sum value and
# multiply our user defined percentage to it to create our threshold value.
maxValue = np.max(histValues)
minValue = minPer*maxValue
# To get the value of the curvature we will find the indices of all the columns that have value
# more than our threshold and then we will average our indices.
indexArray = np.where(histValues >= minValue)
basePoint = int(np.average(indexArray))
#print(basePoint)
if display:
imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)
for x,intensity in enumerate(histValues):
cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)
cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)
return basePoint,imgHist
return basePoint
# stack all the display windows
# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM)
def stackImages(scale,imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range (0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank]*rows
hor_con = [imageBlank]*rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor= np.hstack(imgArray)
ver = hor
return ver
|
flexible
|
{
"blob_id": "44175d2559f9c7d6171b6e45d24719d50dc80fb7",
"index": 7221,
"step-1": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\n<mask token>\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\n<mask token>\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-3": "<mask token>\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-4": "import cv2\nimport numpy as np\n\n\ndef thresholding(img):\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n\ndef warpImg(img, points, w, h, inv=False):\n pts1 = np.float32(points)\n pts2 = np.float32([[0, 0], [w, 0], [0, h], [w, h]])\n if inv:\n matrix = cv2.getPerspectiveTransform(pts2, pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1, pts2)\n imgWarp = cv2.warpPerspective(img, matrix, (w, h))\n return imgWarp\n\n\ndef nothing(a):\n pass\n\n\ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240):\n cv2.namedWindow('Trackbars')\n cv2.resizeWindow('Trackbars', 360, 240)\n cv2.createTrackbar('Width Top', 'Trackbars', initialTrackbarVals[0], wT //\n 2, nothing)\n cv2.createTrackbar('Height Top', 'Trackbars', initialTrackbarVals[1],\n hT, nothing)\n cv2.createTrackbar('Width Bottom', 'Trackbars', initialTrackbarVals[2],\n wT // 2, nothing)\n cv2.createTrackbar('Height Bottom', 'Trackbars', initialTrackbarVals[3],\n hT, nothing)\n\n\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos('Width Top', 'Trackbars')\n heightTop = cv2.getTrackbarPos('Height Top', 'Trackbars')\n widthBottom = cv2.getTrackbarPos('Width Bottom', 'Trackbars')\n heightBottom = cv2.getTrackbarPos('Height Bottom', 'Trackbars')\n points = np.float32([(widthTop, heightTop), (wT - widthTop, heightTop),\n (widthBottom, heightBottom), (wT - widthBottom, heightBottom)])\n return points\n\n\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0, 0, \n 255), cv2.FILLED)\n return img\n\n\ndef getHistogram(img, minPer=0.1, display=False, region=1):\n if region == 1:\n histValues = np.sum(img, axis=0)\n else:\n histValues = np.sum(img[img.shape[0] // region:, :], axis=0)\n maxValue = np.max(histValues)\n minValue = minPer * maxValue\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n if display:\n imgHist = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)\n for x, intensity in enumerate(histValues):\n cv2.line(imgHist, (x, img.shape[0]), (x, img.shape[0] - \n intensity // 255 // region), (255, 0, 255), 1)\n cv2.circle(imgHist, (basePoint, img.shape[0]), 20, (0, 255, 255\n ), cv2.FILLED)\n return basePoint, imgHist\n return basePoint\n\n\ndef stackImages(scale, imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range(0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0),\n None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0\n ][0].shape[1], imgArray[0][0].shape[0]), None,\n scale, scale)\n if len(imgArray[x][y].shape) == 2:\n imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.\n COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank] * rows\n hor_con = [imageBlank] * rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale,\n scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1],\n imgArray[0].shape[0]), None, scale, scale)\n if len(imgArray[x].shape) == 2:\n imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor = np.hstack(imgArray)\n ver = hor\n return ver\n",
"step-5": "import cv2\nimport numpy as np\n\n# THRESHOLDING FUNCTION IMPLEMENTATION\ndef thresholding(img):\n # visualizing image in HSV parameters\n imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # the values for lowerWhite and upperWhite are found by tweaking the HSV min/max params in the \n # trackbar by running ColorPickerScript.py\n lowerWhite = np.array([80, 0, 0])\n upperWhite = np.array([255, 160, 255])\n # passing the values of lowerWhite and upperWhite to create the mask\n maskWhite = cv2.inRange(imgHSV, lowerWhite, upperWhite)\n return maskWhite\n\n# WARPING FUNCTION IMPLEMENTATION\ndef warpImg (img, points, w, h, inv=False):\n pts1 = np.float32(points)\n # defining the border coordinates of the warped image\n pts2 = np.float32([[0,0], [w,0], [0,h], [w,h]])\n # finding the transformation matrix\n if inv:\n #if inverted interchange pts2 and pts1\n matrix = cv2.getPerspectiveTransform(pts2,pts1)\n else:\n matrix = cv2.getPerspectiveTransform(pts1,pts2)\n \n imgWarp = cv2.warpPerspective(img, matrix, (w,h))\n return imgWarp\n\n# trackbar change will call nothing()\ndef nothing(a): \n pass\n\n# Creating the trackbars to find the optimal warping points.\n# Care should be taken to choose points which are not very far from our current position\n# ie. mostly lying in the bottom half region of the image since we should only confidently\n# predict the lane warp present on the road at this point of time.\n\n# create trackbars \ndef initializeTrackbars(initialTrackbarVals, wT=480, hT=240): \n # wT and hT are the target window dimensions ie. window with video\n # create trackbar window\n cv2.namedWindow(\"Trackbars\")\n cv2.resizeWindow(\"Trackbars\", 360, 240)\n cv2.createTrackbar(\"Width Top\", \"Trackbars\", initialTrackbarVals[0], wT//2, nothing)\n cv2.createTrackbar(\"Height Top\", \"Trackbars\", initialTrackbarVals[1], hT, nothing)\n cv2.createTrackbar(\"Width Bottom\", \"Trackbars\", initialTrackbarVals[2], wT//2, nothing)\n cv2.createTrackbar(\"Height Bottom\", \"Trackbars\", initialTrackbarVals[3], hT, nothing)\n\n# find the value of trackbars (real-time)\ndef valTrackbars(wT=480, hT=240):\n widthTop = cv2.getTrackbarPos(\"Width Top\", \"Trackbars\")\n heightTop = cv2.getTrackbarPos(\"Height Top\", \"Trackbars\")\n widthBottom = cv2.getTrackbarPos(\"Width Bottom\", \"Trackbars\")\n heightBottom = cv2.getTrackbarPos(\"Height Bottom\", \"Trackbars\")\n # return the bounding coordinates\n points = np.float32([(widthTop, heightTop), (wT-widthTop, heightTop), (widthBottom, heightBottom), (wT-widthBottom, heightBottom)])\n return points\n\n# draw the warp points as red circles\ndef drawPoints(img, points):\n for x in range(0, 4):\n cv2.circle(img, (int(points[x][0]), int(points[x][1])), 12, (0,0,255), cv2.FILLED)\n return img\n\n# HISTOGRAM IMPLEMENTATION (TO FIND CURVE TURNING LEFT/RIGHT)\ndef getHistogram(img, minPer=0.1, display= False, region=1): \n # simply sum all the pixels in the y direction\n if region == 1:\n # find histvalues for the complete region\n histValues = np.sum(img, axis=0)\n else:\n # find histvalues for ONLY the bottom (1/n)th region where n is region value\n histValues = np.sum(img[img.shape[0]//region:,:], axis=0)\n \n #print(histValues)\n \n # Some of the pixels in our image might just be noise. So we don’t want to use them in our \n # calculation. Therefore we will set a threshold value which will be the minimum value required\n # for any column to qualify as part of the path and not noise. We can set a hard-coded value but\n # it is better to get it based on the live data. So we will find the maximum sum value and \n # multiply our user defined percentage to it to create our threshold value.\n maxValue = np.max(histValues)\n minValue = minPer*maxValue\n \n # To get the value of the curvature we will find the indices of all the columns that have value \n # more than our threshold and then we will average our indices.\n indexArray = np.where(histValues >= minValue)\n basePoint = int(np.average(indexArray))\n #print(basePoint)\n \n if display:\n imgHist = np.zeros((img.shape[0],img.shape[1],3),np.uint8)\n for x,intensity in enumerate(histValues):\n cv2.line(imgHist,(x,img.shape[0]),(x,img.shape[0]-intensity//255//region),(255,0,255),1)\n cv2.circle(imgHist,(basePoint,img.shape[0]),20,(0,255,255),cv2.FILLED)\n return basePoint,imgHist\n \n return basePoint\n \n# stack all the display windows\n# (ONLY FOR DISPLAY PURPOSES, NO EFFECT ON PROGRAM) \ndef stackImages(scale,imgArray):\n rows = len(imgArray)\n cols = len(imgArray[0])\n rowsAvailable = isinstance(imgArray[0], list)\n width = imgArray[0][0].shape[1]\n height = imgArray[0][0].shape[0]\n if rowsAvailable:\n for x in range (0, rows):\n for y in range(0, cols):\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\n else:\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\n if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\n imageBlank = np.zeros((height, width, 3), np.uint8)\n hor = [imageBlank]*rows\n hor_con = [imageBlank]*rows\n for x in range(0, rows):\n hor[x] = np.hstack(imgArray[x])\n ver = np.vstack(hor)\n else:\n for x in range(0, rows):\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\n else:\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\n hor= np.hstack(imgArray)\n ver = hor\n return ver",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
from django.urls import path,include
from .import views
urlpatterns = [
path('',views.home,name='home'),
path('category/',include('api.category.urls')),
path('product/',include('api.product.urls')),
path('user/',include('api.user.urls')),
path('order/',include('api.order.urls')),
path('payment/',include('api.payment.urls')),
]
|
normal
|
{
"blob_id": "fe12f6d3408ab115c5c440c5b45a9014cfee6539",
"index": 564,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-3": "from django.urls import path, include\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"step-4": "from django.urls import path,include\nfrom .import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('category/',include('api.category.urls')),\n path('product/',include('api.product.urls')),\n path('user/',include('api.user.urls')),\n path('order/',include('api.order.urls')),\n path('payment/',include('api.payment.urls')),\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding:utf-8 -*-
#随机森林调参
#RandomizedSearchCV 随机最佳
#GridSearchCV 地毯式最佳
import pandas as pd
features = pd.read_csv('data/temps_extended.csv')
features = pd.get_dummies(features)
labels = features['actual']
features = features.drop('actual', axis = 1)
feature_list = list(features.columns)
import numpy as np
features = np.array(features)
labels = np.array(labels)
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(features, labels,
test_size = 0.25, random_state = 42)
print('Training Features Shape:', train_features.shape)
print('Training Labels Shape:', train_labels.shape)
print('Testing Features Shape:', test_features.shape)
print('Testing Labels Shape:', test_labels.shape)
#################选择6个比较重要的参数当做训练集,重新创建训练集##############################
important_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year']
important_indices = [feature_list.index(feature) for feature in important_feature_names]
important_train_features = train_features[:, important_indices]
important_test_features = test_features[:, important_indices]
print('Important train features shape:', important_train_features.shape)
print('Important test features shape:', important_test_features.shape)
train_features = important_train_features[:]
test_features = important_test_features[:]
feature_list = important_feature_names[:]
#################选择6个比较重要的参数当做训练集,重新创建训练集##############################
########创建随机森林模型###################
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(random_state = 42)
from pprint import pprint
# 打印所有参数
pprint(rf.get_params())
# {'bootstrap': True,#是否随机采样
# 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差
# 'max_depth': None,# 树的最大深度 重要
# 'max_features': 'auto',
# 'max_leaf_nodes': None, 最大叶子节点 重要
# 'min_impurity_decrease': 0.0,
# 'min_impurity_split': None,
# 'min_samples_leaf': 1, 信息增益 重要
# 'min_samples_split': 2, 最小分裂次数 重要
# 'min_weight_fraction_leaf': 0.0,
# 'n_estimators': 'warn',
# 'n_jobs': None, #多少核CPU 去跑
# 'oob_score': False,
# 'random_state': 42,
# 'verbose': 0,
# 'warm_start': False}
from sklearn.model_selection import RandomizedSearchCV# 随机最好
# 建立树的个数
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# 最大特征的选择方式
max_features = ['auto', 'sqrt']
# 树的最大深度 10 20 none
max_depth = [int(x) for x in np.linspace(10, 20, num = 2)]
max_depth.append(None)
# 节点最小分裂所需样本个数
min_samples_split = [2, 5, 10]
# 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值
min_samples_leaf = [1, 2, 4]
# 样本采样方法
bootstrap = [True, False]
# Random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf = RandomForestRegressor()# 创建模型
#随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,
n_iter = 100, scoring='neg_mean_absolute_error',
cv = 3, verbose=2, random_state=42, n_jobs=-1)
# 执行寻找操作
# rf_random.fit(train_features, train_labels)
# print(rf_random.best_params_)
best_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True}
def evaluate(model, test_features, test_labels): #评估
predictions = model.predict(test_features)
errors = abs(predictions - test_labels)
mape = 100 * np.mean(errors / test_labels)
accuracy = 100 - mape
print('平均气温误差.',np.mean(errors))
print('Accuracy = {:0.2f}%.'.format(accuracy))
#################使用默认参数##########################
# 平均气温误差. 3.91697080292
# Accuracy = 93.36%.
base_model = RandomForestRegressor( random_state = 42) #使用默认的参数
base_model.fit(train_features, train_labels)
print('默认参数')
evaluate(base_model, test_features, test_labels)
#################使用默认参数##########################
#################使用最好参数##########################
# 平均气温误差. 3.7141472957
# Accuracy = 93.73%.
best_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True)
best_random.fit(train_features, train_labels)
print('局部最好')
evaluate(best_random, test_features, test_labels)
#################使用最好参数##########################
################在随机最好的参数进行微调######################
# 平均气温误差. 3.69222090145
# Accuracy = 93.77%.
from sklearn.model_selection import GridSearchCV# 地毯式搜索
param_grid = {'n_estimators': [1000, 1200, 1400, 1600],
'min_samples_split': [3, 5, 7],
'min_samples_leaf': [2,3, 4, 5,6],
'max_features': ['auto'],
'max_depth': [None],
'bootstrap': [True]}
rf = RandomForestRegressor()
# 网络搜索
grid_search = GridSearchCV(estimator = rf, param_grid = param_grid,
scoring = 'neg_mean_absolute_error', cv = 3,
n_jobs = -1, verbose = 2)
grid_search.fit(train_features, train_labels)
best_grid = grid_search.best_estimator_
evaluate(best_grid, test_features, test_labels)
################在随机最好的参数进行微调######################
########创建随机森林模型###################
|
normal
|
{
"blob_id": "de4e14a4fa8520c1aae60805084224337dd9620c",
"index": 9009,
"step-1": "<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n<mask token>\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n<mask token>\npprint(rf.get_params())\n<mask token>\nmax_depth.append(None)\n<mask token>\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n<mask token>\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n<mask token>\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\ngrid_search.fit(train_features, train_labels)\n<mask token>\nevaluate(best_grid, test_features, test_labels)\n",
"step-3": "<mask token>\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\n<mask token>\nfeatures = np.array(features)\nlabels = np.array(labels)\n<mask token>\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\n<mask token>\nrf = RandomForestRegressor(random_state=42)\n<mask token>\npprint(rf.get_params())\n<mask token>\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n<mask token>\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n",
"step-4": "import pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\nfeatures = pd.get_dummies(features)\nlabels = features['actual']\nfeatures = features.drop('actual', axis=1)\nfeature_list = list(features.columns)\nimport numpy as np\nfeatures = np.array(features)\nlabels = np.array(labels)\nfrom sklearn.model_selection import train_test_split\ntrain_features, test_features, train_labels, test_labels = train_test_split(\n features, labels, test_size=0.25, random_state=42)\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend',\n 'year']\nimportant_indices = [feature_list.index(feature) for feature in\n important_feature_names]\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\nfeature_list = important_feature_names[:]\nfrom sklearn.ensemble import RandomForestRegressor\nrf = RandomForestRegressor(random_state=42)\nfrom pprint import pprint\npprint(rf.get_params())\nfrom sklearn.model_selection import RandomizedSearchCV\nn_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]\nmax_features = ['auto', 'sqrt']\nmax_depth = [int(x) for x in np.linspace(10, 20, num=2)]\nmax_depth.append(None)\nmin_samples_split = [2, 5, 10]\nmin_samples_leaf = [1, 2, 4]\nbootstrap = [True, False]\nrandom_grid = {'n_estimators': n_estimators, 'max_features': max_features,\n 'max_depth': max_depth, 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf, 'bootstrap': bootstrap}\nrf = RandomForestRegressor()\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=\n random_grid, n_iter=100, scoring='neg_mean_absolute_error', cv=3,\n verbose=2, random_state=42, n_jobs=-1)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10,\n 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None,\n 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels):\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n print('平均气温误差.', np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\nbase_model = RandomForestRegressor(random_state=42)\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\nbest_random = RandomForestRegressor(n_estimators=1800, min_samples_split=10,\n random_state=42, min_samples_leaf=4, max_features='auto', max_depth=\n None, bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\nfrom sklearn.model_selection import GridSearchCV\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600], 'min_samples_split':\n [3, 5, 7], 'min_samples_leaf': [2, 3, 4, 5, 6], 'max_features': ['auto'\n ], 'max_depth': [None], 'bootstrap': [True]}\nrf = RandomForestRegressor()\ngrid_search = GridSearchCV(estimator=rf, param_grid=param_grid, scoring=\n 'neg_mean_absolute_error', cv=3, n_jobs=-1, verbose=2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n",
"step-5": "# -*- coding:utf-8 -*-\n\n#随机森林调参\n#RandomizedSearchCV 随机最佳\n#GridSearchCV 地毯式最佳\n\n\nimport pandas as pd\nfeatures = pd.read_csv('data/temps_extended.csv')\n\n\nfeatures = pd.get_dummies(features)\n\nlabels = features['actual']\nfeatures = features.drop('actual', axis = 1)\n\nfeature_list = list(features.columns)\n\nimport numpy as np\n\nfeatures = np.array(features)\nlabels = np.array(labels)\n\nfrom sklearn.model_selection import train_test_split\n\ntrain_features, test_features, train_labels, test_labels = train_test_split(features, labels,\n test_size = 0.25, random_state = 42)\n\nprint('Training Features Shape:', train_features.shape)\nprint('Training Labels Shape:', train_labels.shape)\nprint('Testing Features Shape:', test_features.shape)\nprint('Testing Labels Shape:', test_labels.shape)\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\nimportant_feature_names = ['temp_1', 'average', 'ws_1', 'temp_2', 'friend', 'year']\n\nimportant_indices = [feature_list.index(feature) for feature in important_feature_names]\n\nimportant_train_features = train_features[:, important_indices]\nimportant_test_features = test_features[:, important_indices]\n\nprint('Important train features shape:', important_train_features.shape)\nprint('Important test features shape:', important_test_features.shape)\n\ntrain_features = important_train_features[:]\ntest_features = important_test_features[:]\n\nfeature_list = important_feature_names[:]\n\n#################选择6个比较重要的参数当做训练集,重新创建训练集##############################\n\n########创建随机森林模型###################\nfrom sklearn.ensemble import RandomForestRegressor\n\nrf = RandomForestRegressor(random_state = 42)\n\nfrom pprint import pprint\n\n# 打印所有参数\npprint(rf.get_params())\n\n# {'bootstrap': True,#是否随机采样\n# 'criterion': 'mse',#指定目标方程 损失的计算方法 熵值 回归 mse计算误差\n# 'max_depth': None,# 树的最大深度 重要\n# 'max_features': 'auto',\n# 'max_leaf_nodes': None, 最大叶子节点 重要\n# 'min_impurity_decrease': 0.0,\n# 'min_impurity_split': None,\n# 'min_samples_leaf': 1, 信息增益 重要\n# 'min_samples_split': 2, 最小分裂次数 重要\n# 'min_weight_fraction_leaf': 0.0,\n# 'n_estimators': 'warn',\n# 'n_jobs': None, #多少核CPU 去跑\n# 'oob_score': False,\n# 'random_state': 42,\n# 'verbose': 0,\n# 'warm_start': False}\n\nfrom sklearn.model_selection import RandomizedSearchCV# 随机最好\n# 建立树的个数\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# 最大特征的选择方式\nmax_features = ['auto', 'sqrt']\n# 树的最大深度 10 20 none\nmax_depth = [int(x) for x in np.linspace(10, 20, num = 2)]\nmax_depth.append(None)\n# 节点最小分裂所需样本个数\nmin_samples_split = [2, 5, 10]\n# 叶子节点最小样本数,任何分裂不能让其子节点样本数少于此值\nmin_samples_leaf = [1, 2, 4]\n# 样本采样方法\nbootstrap = [True, False]\n\n# Random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\n\n\nrf = RandomForestRegressor()# 创建模型\n#随机寻找参数 cv:交叉验证 , n_iter 随机100次,scoring:评估方法,verbose:打印信息,n_jobs:所以cpu去跑\nrf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid,\n n_iter = 100, scoring='neg_mean_absolute_error',\n cv = 3, verbose=2, random_state=42, n_jobs=-1)\n\n\n\n\n# 执行寻找操作\n# rf_random.fit(train_features, train_labels)\n# print(rf_random.best_params_)\nbest_params = {'n_estimators': 1800, 'min_samples_split': 10, 'min_samples_leaf': 4, 'max_features': 'auto', 'max_depth': None, 'bootstrap': True}\n\n\ndef evaluate(model, test_features, test_labels): #评估\n predictions = model.predict(test_features)\n errors = abs(predictions - test_labels)\n mape = 100 * np.mean(errors / test_labels)\n accuracy = 100 - mape\n\n print('平均气温误差.',np.mean(errors))\n print('Accuracy = {:0.2f}%.'.format(accuracy))\n\n\n\n#################使用默认参数##########################\n# 平均气温误差. 3.91697080292\n# Accuracy = 93.36%.\nbase_model = RandomForestRegressor( random_state = 42) #使用默认的参数\nbase_model.fit(train_features, train_labels)\nprint('默认参数')\nevaluate(base_model, test_features, test_labels)\n#################使用默认参数##########################\n\n\n#################使用最好参数##########################\n# 平均气温误差. 3.7141472957\n# Accuracy = 93.73%.\nbest_random = RandomForestRegressor(n_estimators=1800,min_samples_split=10,random_state = 42,min_samples_leaf=4,max_features='auto',max_depth=None,bootstrap=True)\nbest_random.fit(train_features, train_labels)\nprint('局部最好')\nevaluate(best_random, test_features, test_labels)\n#################使用最好参数##########################\n\n################在随机最好的参数进行微调######################\n# 平均气温误差. 3.69222090145\n# Accuracy = 93.77%.\nfrom sklearn.model_selection import GridSearchCV# 地毯式搜索\n\nparam_grid = {'n_estimators': [1000, 1200, 1400, 1600],\n 'min_samples_split': [3, 5, 7],\n 'min_samples_leaf': [2,3, 4, 5,6],\n 'max_features': ['auto'],\n 'max_depth': [None],\n 'bootstrap': [True]}\n\n\n\nrf = RandomForestRegressor()\n\n# 网络搜索\ngrid_search = GridSearchCV(estimator = rf, param_grid = param_grid,\n scoring = 'neg_mean_absolute_error', cv = 3,\n n_jobs = -1, verbose = 2)\ngrid_search.fit(train_features, train_labels)\nbest_grid = grid_search.best_estimator_\nevaluate(best_grid, test_features, test_labels)\n################在随机最好的参数进行微调######################\n\n\n########创建随机森林模型###################",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def numIdenticalPairs(self, nums: List[int]) ->int:
count = 0
inputLength = len(nums)
for i in range(0, inputLength):
for j in range(i, inputLength - 1):
if nums[i] == nums[j + 1]:
count += 1
return count
<|reserved_special_token_1|>
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
count = 0
inputLength = len(nums)
for i in range (0, inputLength):
for j in range (i, inputLength - 1):
if (nums[i] == nums[j + 1]): count += 1
return count
|
flexible
|
{
"blob_id": "d7570bbea1e8c7674d507f8e86ce04d22058b21b",
"index": 7595,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def numIdenticalPairs(self, nums: List[int]) ->int:\n count = 0\n inputLength = len(nums)\n for i in range(0, inputLength):\n for j in range(i, inputLength - 1):\n if nums[i] == nums[j + 1]:\n count += 1\n return count\n",
"step-4": "class Solution:\n def numIdenticalPairs(self, nums: List[int]) -> int:\n count = 0\n inputLength = len(nums)\n for i in range (0, inputLength):\n for j in range (i, inputLength - 1):\n if (nums[i] == nums[j + 1]): count += 1\n return count",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PhysicianRobot(Robot):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print('Hi, Ich bin ' + self.name)
def say_hi_to_everybody(self):
print('Hi to all objects :-)')
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print('Hi, Ich bin ' + self.name)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print('Hi, Ich bin ' + self.name)
def say_hi_to_everybody(self):
print('Hi to all objects :-)')
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print('Hi, Ich bin ' + self.name)
<|reserved_special_token_0|>
print(x, type(x))
x.say_hi()
x.say_hi_to_everybody()
print(y, type(y))
y.say_hi()
y.say_hi_again()
y.say_hi_to_everybody()
<|reserved_special_token_1|>
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print('Hi, Ich bin ' + self.name)
def say_hi_to_everybody(self):
print('Hi to all objects :-)')
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print('Hi, Ich bin ' + self.name)
name_1 = 'Marvin'
name_2 = 'James'
x = Robot(name_1)
y = PhysicianRobot(name_2)
print(x, type(x))
x.say_hi()
x.say_hi_to_everybody()
print(y, type(y))
y.say_hi()
y.say_hi_again()
y.say_hi_to_everybody()
<|reserved_special_token_1|>
class Robot:
def __init__(self, name):
self.name = name
def say_hi(self):
print("Hi, I'm from class Robot")
print("Hi, Ich bin " + self.name)
def say_hi_to_everybody(self):
print("Hi to all objects :-)")
class PhysicianRobot(Robot):
def say_hi_again(self):
print("Hi, I'm from sub-class PhysicianRobot")
print("Hi, Ich bin " + self.name)
name_1 = "Marvin"
name_2 = "James"
x = Robot(name_1)
y = PhysicianRobot(name_2)
print(x, type(x))
x.say_hi()
x.say_hi_to_everybody()
print(y, type(y))
y.say_hi()
y.say_hi_again()
y.say_hi_to_everybody()
|
flexible
|
{
"blob_id": "6b24c438ca7bb4c37ae356c18c562831767f0569",
"index": 9961,
"step-1": "<mask token>\n\n\nclass PhysicianRobot(Robot):\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\n",
"step-3": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\n<mask token>\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-4": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print('Hi, Ich bin ' + self.name)\n\n def say_hi_to_everybody(self):\n print('Hi to all objects :-)')\n\n\nclass PhysicianRobot(Robot):\n\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print('Hi, Ich bin ' + self.name)\n\n\nname_1 = 'Marvin'\nname_2 = 'James'\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-5": "class Robot:\n\n def __init__(self, name):\n self.name = name\n\n def say_hi(self):\n print(\"Hi, I'm from class Robot\")\n print(\"Hi, Ich bin \" + self.name)\n\n def say_hi_to_everybody(self):\n print(\"Hi to all objects :-)\")\n\n\nclass PhysicianRobot(Robot):\n def say_hi_again(self):\n print(\"Hi, I'm from sub-class PhysicianRobot\")\n print(\"Hi, Ich bin \" + self.name)\n\n\nname_1 = \"Marvin\"\nname_2 = \"James\"\n\nx = Robot(name_1)\ny = PhysicianRobot(name_2)\n\nprint(x, type(x))\nx.say_hi()\nx.say_hi_to_everybody()\n\nprint(y, type(y))\ny.say_hi()\ny.say_hi_again()\ny.say_hi_to_everybody()\n",
"step-ids": [
1,
6,
7,
8,
9
]
}
|
[
1,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def t_tm_tM__a__g(db, anno):
cmd = (
"\nSELECT data, t, tmin, tmax\nFROM Giornaliero\nWHERE strftime('%Y') = '{}'\n "
.format(anno))
dati = db.cur.execute(cmd).fetchall()
ldate = []
lt = []
ltm = []
ltM = []
for data, t, tm, tM in dati:
ldate.append(data)
lt.append(t)
ltm.append(tm)
ltM.append(tM)
return ldate, lt, ltm, ltM
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def t_tm_tM__a__g(db, anno):
cmd = (
"\nSELECT data, t, tmin, tmax\nFROM Giornaliero\nWHERE strftime('%Y') = '{}'\n "
.format(anno))
dati = db.cur.execute(cmd).fetchall()
ldate = []
lt = []
ltm = []
ltM = []
for data, t, tm, tM in dati:
ldate.append(data)
lt.append(t)
ltm.append(tm)
ltM.append(tM)
return ldate, lt, ltm, ltM
if __name__ == '__main__':
db = DB.DB()
db.crea_db()
t_tm_tM__a__g(db, 2017)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import db_02 as DB
def t_tm_tM__a__g(db, anno):
cmd = (
"\nSELECT data, t, tmin, tmax\nFROM Giornaliero\nWHERE strftime('%Y') = '{}'\n "
.format(anno))
dati = db.cur.execute(cmd).fetchall()
ldate = []
lt = []
ltm = []
ltM = []
for data, t, tm, tM in dati:
ldate.append(data)
lt.append(t)
ltm.append(tm)
ltM.append(tM)
return ldate, lt, ltm, ltM
if __name__ == '__main__':
db = DB.DB()
db.crea_db()
t_tm_tM__a__g(db, 2017)
<|reserved_special_token_1|>
# 12.02.17
"""
nomencalura
a__b__c
a: parametro
t-temperatura
tm-temperatura minima
tM-teperatura massima
b: intervallo di tempo
a-anno
c: tabella fonte dati
g-giornaliero
"""
import db_02 as DB
def t_tm_tM__a__g(db, anno):
cmd = """
SELECT data, t, tmin, tmax
FROM Giornaliero
WHERE strftime('%Y') = '{}'
""".format(anno)
dati = db.cur.execute(cmd).fetchall()
ldate = []
lt = []
ltm = []
ltM = []
for data, t, tm , tM in dati:
ldate.append(data)
lt.append(t)
ltm.append(tm)
ltM.append(tM)
return ldate, lt, ltm, ltM
if __name__ == '__main__':
db = DB.DB()
db.crea_db()
t_tm_tM__a__g(db, 2017)
|
flexible
|
{
"blob_id": "26b0a762b8eb30f0ef3c5a914f032c2a7d24f750",
"index": 5606,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n t_tm_tM__a__g(db, 2017)\n",
"step-4": "<mask token>\nimport db_02 as DB\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = (\n \"\\nSELECT data, t, tmin, tmax\\nFROM Giornaliero\\nWHERE strftime('%Y') = '{}'\\n \"\n .format(anno))\n dati = db.cur.execute(cmd).fetchall()\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm, tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n t_tm_tM__a__g(db, 2017)\n",
"step-5": "# 12.02.17\n\n\"\"\"\nnomencalura\na__b__c\n\na: parametro\n t-temperatura\n tm-temperatura minima\n tM-teperatura massima\n\nb: intervallo di tempo\n a-anno\n\nc: tabella fonte dati\n g-giornaliero\n\"\"\"\n\nimport db_02 as DB\n\n\ndef t_tm_tM__a__g(db, anno):\n cmd = \"\"\"\nSELECT data, t, tmin, tmax\nFROM Giornaliero\nWHERE strftime('%Y') = '{}'\n \"\"\".format(anno)\n\n dati = db.cur.execute(cmd).fetchall()\n\n ldate = []\n lt = []\n ltm = []\n ltM = []\n for data, t, tm , tM in dati:\n ldate.append(data)\n lt.append(t)\n ltm.append(tm)\n ltM.append(tM)\n\n return ldate, lt, ltm, ltM\n\n\nif __name__ == '__main__':\n db = DB.DB()\n db.crea_db()\n\n t_tm_tM__a__g(db, 2017)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def normalize(f, lammatize=False):
f = [x.lower() for x in f]
f = [x.replace('\\n', ' ') for x in f]
f = [x.replace('\\t', ' ') for x in f]
f = [x.replace('\\xa0', ' ') for x in f]
f = [x.replace('\\xc2', ' ') for x in f]
f = [x.replace(' u ', ' you ') for x in f]
f = [x.replace(' em ', ' them ') for x in f]
f = [x.replace(' da ', ' the ') for x in f]
f = [x.replace(' yo ', ' you ') for x in f]
f = [x.replace(' ur ', ' you ') for x in f]
f = [x.replace("won't", 'will not') for x in f]
f = [x.replace("can't", 'cannot') for x in f]
f = [x.replace("i'm", 'i am') for x in f]
f = [x.replace(' im ', ' i am ') for x in f]
f = [x.replace("ain't", 'is not') for x in f]
f = [x.replace("'ll", ' will') for x in f]
f = [x.replace("'t", ' not') for x in f]
f = [x.replace("'ve", ' have') for x in f]
f = [x.replace("'s", ' is') for x in f]
f = [x.replace("'re", ' are') for x in f]
f = [x.replace("'d", ' would') for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = ' ' + key + ' '
vpad = ' ' + value + ' '
f = [x.replace(kpad, vpad) for x in f]
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents
[i][j])
for i in range(0, len(tokenized_sents)):
f[i] = ' '.join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features
=500, binary=False, do_normalization=False, stopwords=False, verbose=
True, analyzer_char=False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),
binary=True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
stop_words='english', analyzer=analyzer_type, sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
sublinear_tf=True, analyzer=analyzer_type)
if verbose:
print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,
min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,
no_of_features=500, do_normalization=False, verbose=True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose=True):
g = [x.lower().replace('you are', ' SSS ').replace("you're", ' SSS ').
replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]
f = []
for x in g:
fts = ' '
x = normalize(x)
for y in x:
w = y.strip().replace('?', '.').split('.')
fts = fts + ' ' + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,
do_normalization=True, verbose=verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, 'r')
bwMap = dict()
for line in f:
sp = line.strip().lower().split(',')
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
<|reserved_special_token_0|>
def write_submission(x, filename):
wtr = open(filename, 'w')
for i in range(len(x)):
wtr.write(format(x[i], '0.10f'))
wtr.write('\n')
wtr.close()
def run(verbose=True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
labels = array([int(x[0]) for x in train_data])
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose
=verbose)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def normalize(f, lammatize=False):
f = [x.lower() for x in f]
f = [x.replace('\\n', ' ') for x in f]
f = [x.replace('\\t', ' ') for x in f]
f = [x.replace('\\xa0', ' ') for x in f]
f = [x.replace('\\xc2', ' ') for x in f]
f = [x.replace(' u ', ' you ') for x in f]
f = [x.replace(' em ', ' them ') for x in f]
f = [x.replace(' da ', ' the ') for x in f]
f = [x.replace(' yo ', ' you ') for x in f]
f = [x.replace(' ur ', ' you ') for x in f]
f = [x.replace("won't", 'will not') for x in f]
f = [x.replace("can't", 'cannot') for x in f]
f = [x.replace("i'm", 'i am') for x in f]
f = [x.replace(' im ', ' i am ') for x in f]
f = [x.replace("ain't", 'is not') for x in f]
f = [x.replace("'ll", ' will') for x in f]
f = [x.replace("'t", ' not') for x in f]
f = [x.replace("'ve", ' have') for x in f]
f = [x.replace("'s", ' is') for x in f]
f = [x.replace("'re", ' are') for x in f]
f = [x.replace("'d", ' would') for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = ' ' + key + ' '
vpad = ' ' + value + ' '
f = [x.replace(kpad, vpad) for x in f]
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents
[i][j])
for i in range(0, len(tokenized_sents)):
f[i] = ' '.join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features
=500, binary=False, do_normalization=False, stopwords=False, verbose=
True, analyzer_char=False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),
binary=True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
stop_words='english', analyzer=analyzer_type, sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
sublinear_tf=True, analyzer=analyzer_type)
if verbose:
print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,
min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,
no_of_features=500, do_normalization=False, verbose=True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose=True):
g = [x.lower().replace('you are', ' SSS ').replace("you're", ' SSS ').
replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]
f = []
for x in g:
fts = ' '
x = normalize(x)
for y in x:
w = y.strip().replace('?', '.').split('.')
fts = fts + ' ' + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,
do_normalization=True, verbose=verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, 'r')
bwMap = dict()
for line in f:
sp = line.strip().lower().split(',')
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
def readCsv(fname, skipFirst=True, delimiter=','):
reader = csv.reader(open(fname), delimiter=delimiter)
rows = []
count = 1
for row in reader:
if not skipFirst or count > 1:
rows.append(row)
count += 1
return rows
def write_submission(x, filename):
wtr = open(filename, 'w')
for i in range(len(x)):
wtr.write(format(x[i], '0.10f'))
wtr.write('\n')
wtr.close()
def run(verbose=True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
labels = array([int(x[0]) for x in train_data])
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose
=verbose)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def normalize(f, lammatize=False):
f = [x.lower() for x in f]
f = [x.replace('\\n', ' ') for x in f]
f = [x.replace('\\t', ' ') for x in f]
f = [x.replace('\\xa0', ' ') for x in f]
f = [x.replace('\\xc2', ' ') for x in f]
f = [x.replace(' u ', ' you ') for x in f]
f = [x.replace(' em ', ' them ') for x in f]
f = [x.replace(' da ', ' the ') for x in f]
f = [x.replace(' yo ', ' you ') for x in f]
f = [x.replace(' ur ', ' you ') for x in f]
f = [x.replace("won't", 'will not') for x in f]
f = [x.replace("can't", 'cannot') for x in f]
f = [x.replace("i'm", 'i am') for x in f]
f = [x.replace(' im ', ' i am ') for x in f]
f = [x.replace("ain't", 'is not') for x in f]
f = [x.replace("'ll", ' will') for x in f]
f = [x.replace("'t", ' not') for x in f]
f = [x.replace("'ve", ' have') for x in f]
f = [x.replace("'s", ' is') for x in f]
f = [x.replace("'re", ' are') for x in f]
f = [x.replace("'d", ' would') for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = ' ' + key + ' '
vpad = ' ' + value + ' '
f = [x.replace(kpad, vpad) for x in f]
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents
[i][j])
for i in range(0, len(tokenized_sents)):
f[i] = ' '.join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features
=500, binary=False, do_normalization=False, stopwords=False, verbose=
True, analyzer_char=False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),
binary=True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
stop_words='english', analyzer=analyzer_type, sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
sublinear_tf=True, analyzer=analyzer_type)
if verbose:
print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,
min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,
no_of_features=500, do_normalization=False, verbose=True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose=True):
g = [x.lower().replace('you are', ' SSS ').replace("you're", ' SSS ').
replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]
f = []
for x in g:
fts = ' '
x = normalize(x)
for y in x:
w = y.strip().replace('?', '.').split('.')
fts = fts + ' ' + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,
do_normalization=True, verbose=verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, 'r')
bwMap = dict()
for line in f:
sp = line.strip().lower().split(',')
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
def readCsv(fname, skipFirst=True, delimiter=','):
reader = csv.reader(open(fname), delimiter=delimiter)
rows = []
count = 1
for row in reader:
if not skipFirst or count > 1:
rows.append(row)
count += 1
return rows
def write_submission(x, filename):
wtr = open(filename, 'w')
for i in range(len(x)):
wtr.write(format(x[i], '0.10f'))
wtr.write('\n')
wtr.close()
def run(verbose=True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
labels = array([int(x[0]) for x in train_data])
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose
=verbose)
<|reserved_special_token_0|>
run()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DIR_PATH = ''
TRAIN_FILE = DIR_PATH + 'train.csv'
TEST_SOL_FILE = DIR_PATH + 'test_with_solutions.csv'
BADWORDS_FILE = DIR_PATH + 'bad_words.txt'
TEST_FILE = DIR_PATH + 'test.csv'
PREDICTION_FILE = DIR_PATH + 'preds.csv'
def normalize(f, lammatize=False):
f = [x.lower() for x in f]
f = [x.replace('\\n', ' ') for x in f]
f = [x.replace('\\t', ' ') for x in f]
f = [x.replace('\\xa0', ' ') for x in f]
f = [x.replace('\\xc2', ' ') for x in f]
f = [x.replace(' u ', ' you ') for x in f]
f = [x.replace(' em ', ' them ') for x in f]
f = [x.replace(' da ', ' the ') for x in f]
f = [x.replace(' yo ', ' you ') for x in f]
f = [x.replace(' ur ', ' you ') for x in f]
f = [x.replace("won't", 'will not') for x in f]
f = [x.replace("can't", 'cannot') for x in f]
f = [x.replace("i'm", 'i am') for x in f]
f = [x.replace(' im ', ' i am ') for x in f]
f = [x.replace("ain't", 'is not') for x in f]
f = [x.replace("'ll", ' will') for x in f]
f = [x.replace("'t", ' not') for x in f]
f = [x.replace("'ve", ' have') for x in f]
f = [x.replace("'s", ' is') for x in f]
f = [x.replace("'re", ' are') for x in f]
f = [x.replace("'d", ' would') for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = ' ' + key + ' '
vpad = ' ' + value + ' '
f = [x.replace(kpad, vpad) for x in f]
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range(0, len(tokenized_sents)):
for j in range(0, len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents
[i][j])
for i in range(0, len(tokenized_sents)):
f[i] = ' '.join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features
=500, binary=False, do_normalization=False, stopwords=False, verbose=
True, analyzer_char=False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),
binary=True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
stop_words='english', analyzer=analyzer_type, sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),
sublinear_tf=True, analyzer=analyzer_type)
if verbose:
print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,
min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,
no_of_features=500, do_normalization=False, verbose=True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print('Extracting best features by a chi-squared test.. ', X_train.
shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose=True):
g = [x.lower().replace('you are', ' SSS ').replace("you're", ' SSS ').
replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]
f = []
for x in g:
fts = ' '
x = normalize(x)
for y in x:
w = y.strip().replace('?', '.').split('.')
fts = fts + ' ' + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,
do_normalization=True, verbose=verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, 'r')
bwMap = dict()
for line in f:
sp = line.strip().lower().split(',')
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
def readCsv(fname, skipFirst=True, delimiter=','):
reader = csv.reader(open(fname), delimiter=delimiter)
rows = []
count = 1
for row in reader:
if not skipFirst or count > 1:
rows.append(row)
count += 1
return rows
def write_submission(x, filename):
wtr = open(filename, 'w')
for i in range(len(x)):
wtr.write(format(x[i], '0.10f'))
wtr.write('\n')
wtr.close()
def run(verbose=True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
labels = array([int(x[0]) for x in train_data])
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose
=verbose)
<|reserved_special_token_0|>
run()
<|reserved_special_token_1|>
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn import metrics, ensemble, linear_model, svm
from numpy import log, ones, array, zeros, mean, std, repeat
import numpy as np
import scipy.sparse as sp
import re
import csv
from time import time
import functools
from nltk.util import skipgrams
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
DIR_PATH = ""
TRAIN_FILE = DIR_PATH + "train.csv"
TEST_SOL_FILE = DIR_PATH + "test_with_solutions.csv" # This is also used for training, together with TRAIN_FILE
BADWORDS_FILE = DIR_PATH + "bad_words.txt" # attached with submission
TEST_FILE = DIR_PATH + "test.csv" # set this to the new test file name
PREDICTION_FILE = DIR_PATH + "preds.csv" # predictions will be written here
def normalize(f , lammatize= False):
f = [x.lower() for x in f]
f = [x.replace("\\n"," ") for x in f]
f = [x.replace("\\t"," ") for x in f]
f = [x.replace("\\xa0"," ") for x in f]
f = [x.replace("\\xc2"," ") for x in f]
#f = [x.replace(","," ").replace("."," ").replace(" ", " ") for x in f]
#f = [re.subn(" ([a-z]) ","\\1", x)[0] for x in f]
#f = [x.replace(" "," ") for x in f]
f = [x.replace(" u "," you ") for x in f]
f = [x.replace(" em "," them ") for x in f]
f = [x.replace(" da "," the ") for x in f]
f = [x.replace(" yo "," you ") for x in f]
f = [x.replace(" ur "," you ") for x in f]
#f = [x.replace(" ur "," your ") for x in f]
#f = [x.replace(" ur "," you're ") for x in f]
f = [x.replace("won't", "will not") for x in f]
f = [x.replace("can't", "cannot") for x in f]
f = [x.replace("i'm", "i am") for x in f]
f = [x.replace(" im ", " i am ") for x in f]
f = [x.replace("ain't", "is not") for x in f]
f = [x.replace("'ll", " will") for x in f]
f = [x.replace("'t", " not") for x in f]
f = [x.replace("'ve", " have") for x in f]
f = [x.replace("'s", " is") for x in f]
f = [x.replace("'re", " are") for x in f]
f = [x.replace("'d", " would") for x in f]
#f = [x.replace("outta", "out of") for x in f]
bwMap = loadBW()
for key, value in bwMap.items():
kpad = " " + key + " "
vpad = " " + value + " "
f = [x.replace(kpad, vpad) for x in f]
# stemming
"""
f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f]
#f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f]
f = [re.subn("s( |$)", " ", x)[0].strip() for x in f]
f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f]
f = [x.replace("tard ", " ") for x in f]
f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f]
f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f]
f = [re.subn("<\S*>","", x)[0].strip() for x in f]
"""
tokenized_sents = [word_tokenize(i) for i in f]
if not lammatize:
stemmer = PorterStemmer()
for i in range (0, len(tokenized_sents)):
for j in range (0,len(tokenized_sents[i])):
tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])
else:
lammatizer = WordNetLemmatizer()
for i in range (0, len(tokenized_sents)):
for j in range (0,len(tokenized_sents[i])):
tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j])
for i in range (0, len(tokenized_sents)):
f[i] = " ".join(tokenized_sents[i])
return f
def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
analyzer_type = 'word'
if analyzer_char:
analyzer_type = 'char'
if binary:
vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True)
elif stopwords:
vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True)
else:
vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type)
if verbose:
print ("extracting ngrams... where n is [%d,%d]" % (max_ngrams,min_ngrams))
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = no_of_features
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape )
return X_train, y, X_test
def skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True):
f = data
if do_normalization:
f = normalize(f)
ftrain = f[:ntrain]
ftest = f[ntrain:]
y_train = labels[:ntrain]
t0 = time()
skipper = functools.partial(skipgrams, n=2, k=3)
vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper)
X_train = vectorizer.fit_transform(ftrain)
X_test = vectorizer.transform(ftest)
if verbose:
print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape)
y = array(y_train)
numFts = nm
if numFts < X_train.shape[1]:
t0 = time()
ch2 = SelectKBest(chi2, k=numFts)
X_train = ch2.fit_transform(X_train, y)
X_test = ch2.transform(X_test)
assert sp.issparse(X_train)
if verbose:
print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape)
return X_train, y, X_test
def specialCases(data, labels, ntrain, verbose = True):
g = [x.lower().replace("you are"," SSS ").replace("you're"," SSS ").replace(" ur ", " SSS ").split("SSS")[1:] for x in data]
f = []
for x in g:
fts = " "
x = normalize(x)
for y in x:
w = y.strip().replace("?",".").split(".")
fts = fts + " " + w[0]
f.append(fts)
X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose)
return X_trn, y_trn, X_tst
def loadBW():
f = open(BADWORDS_FILE, "r")
bwMap = dict()
for line in f:
sp = line.strip().lower().split(",")
if len(sp) == 2:
bwMap[sp[0].strip()] = sp[1].strip()
return bwMap
def readCsv(fname, skipFirst=True, delimiter = ","):
reader = csv.reader(open(fname),delimiter=delimiter)
rows = []
count = 1
for row in reader:
if not skipFirst or count > 1:
rows.append(row)
count += 1
return rows
def write_submission(x,filename):
wtr = open(filename,"w")
for i in range(len(x)):
wtr.write(format(x[i],"0.10f"))
wtr.write("\n")
wtr.close()
def run(verbose = True):
t0 = time()
train_data = readCsv(TRAIN_FILE)
train2_data = readCsv(TEST_SOL_FILE)
train_data = train_data + train2_data
# print(train_data)
labels = array([int(x[0]) for x in train_data])
# print(labels)
train = [x[2] for x in train_data]
test_data = readCsv(TEST_FILE)
test_data = [x[2] for x in test_data]
data = train + test_data
n = len(data)
ntrain = len(train)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)
"""
X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose)
X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose)
X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose)
X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True)
X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)
X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose)
X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8])
X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8])
if verbose:
print "######## Total time for feature extraction: %fs" % (time() - t0), X_tn.shape, X_tt.shape
predictions = runClassifiers(X_tn, labels, X_tt)
write_submission(predictions, PREDICTION_FILE)
print "Predictions written to:", PREDICTION_FILE
"""
run()
#some code for n grams (use tdifvectorizer)
|
flexible
|
{
"blob_id": "91eb0ae8e59f24aeefdabd46546bc8fb7a0b6f6c",
"index": 3833,
"step-1": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\n<mask token>\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n",
"step-4": "<mask token>\nDIR_PATH = ''\nTRAIN_FILE = DIR_PATH + 'train.csv'\nTEST_SOL_FILE = DIR_PATH + 'test_with_solutions.csv'\nBADWORDS_FILE = DIR_PATH + 'bad_words.txt'\nTEST_FILE = DIR_PATH + 'test.csv'\nPREDICTION_FILE = DIR_PATH + 'preds.csv'\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n",
"step-5": "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn import metrics, ensemble, linear_model, svm\nfrom numpy import log, ones, array, zeros, mean, std, repeat\nimport numpy as np\nimport scipy.sparse as sp\nimport re\nimport csv\nfrom time import time\nimport functools\nfrom nltk.util import skipgrams\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\n\nDIR_PATH = \"\"\n\nTRAIN_FILE = DIR_PATH + \"train.csv\"\nTEST_SOL_FILE = DIR_PATH + \"test_with_solutions.csv\" # This is also used for training, together with TRAIN_FILE\nBADWORDS_FILE = DIR_PATH + \"bad_words.txt\" # attached with submission \n\nTEST_FILE = DIR_PATH + \"test.csv\" # set this to the new test file name\nPREDICTION_FILE = DIR_PATH + \"preds.csv\" # predictions will be written here \n\ndef normalize(f , lammatize= False):\n f = [x.lower() for x in f]\n f = [x.replace(\"\\\\n\",\" \") for x in f] \n f = [x.replace(\"\\\\t\",\" \") for x in f] \n f = [x.replace(\"\\\\xa0\",\" \") for x in f]\n f = [x.replace(\"\\\\xc2\",\" \") for x in f]\n\n #f = [x.replace(\",\",\" \").replace(\".\",\" \").replace(\" \", \" \") for x in f]\n #f = [re.subn(\" ([a-z]) \",\"\\\\1\", x)[0] for x in f] \n #f = [x.replace(\" \",\" \") for x in f]\n\n f = [x.replace(\" u \",\" you \") for x in f]\n f = [x.replace(\" em \",\" them \") for x in f]\n f = [x.replace(\" da \",\" the \") for x in f]\n f = [x.replace(\" yo \",\" you \") for x in f]\n f = [x.replace(\" ur \",\" you \") for x in f]\n #f = [x.replace(\" ur \",\" your \") for x in f]\n #f = [x.replace(\" ur \",\" you're \") for x in f]\n \n f = [x.replace(\"won't\", \"will not\") for x in f]\n f = [x.replace(\"can't\", \"cannot\") for x in f]\n f = [x.replace(\"i'm\", \"i am\") for x in f]\n f = [x.replace(\" im \", \" i am \") for x in f]\n f = [x.replace(\"ain't\", \"is not\") for x in f]\n f = [x.replace(\"'ll\", \" will\") for x in f]\n f = [x.replace(\"'t\", \" not\") for x in f]\n f = [x.replace(\"'ve\", \" have\") for x in f]\n f = [x.replace(\"'s\", \" is\") for x in f]\n f = [x.replace(\"'re\", \" are\") for x in f]\n f = [x.replace(\"'d\", \" would\") for x in f]\n\n #f = [x.replace(\"outta\", \"out of\") for x in f]\n\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = \" \" + key + \" \"\n vpad = \" \" + value + \" \"\n f = [x.replace(kpad, vpad) for x in f]\n \n # stemming \n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j]) \n for i in range (0, len(tokenized_sents)):\n f[i] = \" \".join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n \n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n \n if binary:\n vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type)\n\n if verbose:\n print (\"extracting ngrams... where n is [%d,%d]\" % (max_ngrams,min_ngrams))\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n\n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape ) \n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n \n vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper)\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape) \n return X_train, y, X_test\n\n\n\ndef specialCases(data, labels, ntrain, verbose = True):\n g = [x.lower().replace(\"you are\",\" SSS \").replace(\"you're\",\" SSS \").replace(\" ur \", \" SSS \").split(\"SSS\")[1:] for x in data]\n\n f = []\n for x in g:\n fts = \" \"\n x = normalize(x)\n for y in x:\n w = y.strip().replace(\"?\",\".\").split(\".\")\n fts = fts + \" \" + w[0] \n f.append(fts)\n \n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose)\n return X_trn, y_trn, X_tst\n\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, \"r\")\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(\",\")\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n \n\ndef readCsv(fname, skipFirst=True, delimiter = \",\"):\n reader = csv.reader(open(fname),delimiter=delimiter)\n \n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1: \n rows.append(row)\n count += 1\n return rows\n\ndef write_submission(x,filename):\n wtr = open(filename,\"w\")\n for i in range(len(x)):\n wtr.write(format(x[i],\"0.10f\"))\n wtr.write(\"\\n\")\n wtr.close()\n\ndef run(verbose = True):\n t0 = time()\n\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n \n train_data = train_data + train2_data\n # print(train_data)\n labels = array([int(x[0]) for x in train_data])\n # print(labels) \n train = [x[2] for x in train_data]\n\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data] \n \n data = train + test_data\n\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n \n\"\"\"\n X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose)\n \n X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose)\n X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose) \n X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True) \n\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose)\n\n X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8])\n X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8])\n \n if verbose:\n print \"######## Total time for feature extraction: %fs\" % (time() - t0), X_tn.shape, X_tt.shape\n \n predictions = runClassifiers(X_tn, labels, X_tt)\n \n write_submission(predictions, PREDICTION_FILE) \n print \"Predictions written to:\", PREDICTION_FILE\n\"\"\"\n\nrun()\n#some code for n grams (use tdifvectorizer)\n\n\n\n\n\n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
from zeus import auth, factories
from zeus.constants import Result, Status
from zeus.models import FailureReason
from zeus.tasks import aggregate_build_stats_for_job
def test_unfinished_job(mocker, db_session, default_source):
auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.
repository_id]))
build = factories.BuildFactory(source=default_source, queued=True)
db_session.add(build)
job = factories.JobFactory(build=build, in_progress=True)
db_session.add(job)
aggregate_build_stats_for_job(job.id)
assert build.status == Status.in_progress
assert build.result == Result.unknown
def test_finished_job(mocker, db_session, default_source):
auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.
repository_id]))
build = factories.BuildFactory(source=default_source, in_progress=True)
db_session.add(build)
job = factories.JobFactory(build=build, failed=True)
db_session.add(job)
aggregate_build_stats_for_job(job.id)
assert build.status == Status.finished
assert build.result == Result.failed
def test_failing_tests(mocker, db_session, default_source):
auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.
repository_id]))
build = factories.BuildFactory(source=default_source, in_progress=True)
db_session.add(build)
job = factories.JobFactory(build=build, passed=True)
db_session.add(job)
factories.TestCaseFactory(job=job, failed=True)
aggregate_build_stats_for_job(job.id)
assert job.result == Result.failed
reasons = list(FailureReason.query.filter(FailureReason.job_id == job.id))
assert len(reasons) == 1
assert reasons[0].reason == FailureReason.Code.failing_tests
|
normal
|
{
"blob_id": "71b78b1347456420c3fc29605887d20ba5bff06e",
"index": 4313,
"step-1": "<mask token>\n\n\ndef test_unfinished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, queued=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, in_progress=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.in_progress\n assert build.result == Result.unknown\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_unfinished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, queued=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, in_progress=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.in_progress\n assert build.result == Result.unknown\n\n\n<mask token>\n\n\ndef test_failing_tests(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, in_progress=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, passed=True)\n db_session.add(job)\n factories.TestCaseFactory(job=job, failed=True)\n aggregate_build_stats_for_job(job.id)\n assert job.result == Result.failed\n reasons = list(FailureReason.query.filter(FailureReason.job_id == job.id))\n assert len(reasons) == 1\n assert reasons[0].reason == FailureReason.Code.failing_tests\n",
"step-3": "<mask token>\n\n\ndef test_unfinished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, queued=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, in_progress=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.in_progress\n assert build.result == Result.unknown\n\n\ndef test_finished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, in_progress=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, failed=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.finished\n assert build.result == Result.failed\n\n\ndef test_failing_tests(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, in_progress=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, passed=True)\n db_session.add(job)\n factories.TestCaseFactory(job=job, failed=True)\n aggregate_build_stats_for_job(job.id)\n assert job.result == Result.failed\n reasons = list(FailureReason.query.filter(FailureReason.job_id == job.id))\n assert len(reasons) == 1\n assert reasons[0].reason == FailureReason.Code.failing_tests\n",
"step-4": "from zeus import auth, factories\nfrom zeus.constants import Result, Status\nfrom zeus.models import FailureReason\nfrom zeus.tasks import aggregate_build_stats_for_job\n\n\ndef test_unfinished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, queued=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, in_progress=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.in_progress\n assert build.result == Result.unknown\n\n\ndef test_finished_job(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, in_progress=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, failed=True)\n db_session.add(job)\n aggregate_build_stats_for_job(job.id)\n assert build.status == Status.finished\n assert build.result == Result.failed\n\n\ndef test_failing_tests(mocker, db_session, default_source):\n auth.set_current_tenant(auth.Tenant(repository_ids=[default_source.\n repository_id]))\n build = factories.BuildFactory(source=default_source, in_progress=True)\n db_session.add(build)\n job = factories.JobFactory(build=build, passed=True)\n db_session.add(job)\n factories.TestCaseFactory(job=job, failed=True)\n aggregate_build_stats_for_job(job.id)\n assert job.result == Result.failed\n reasons = list(FailureReason.query.filter(FailureReason.job_id == job.id))\n assert len(reasons) == 1\n assert reasons[0].reason == FailureReason.Code.failing_tests\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import pandas as pd;
import time;
import matplotlib.pyplot as plt;
import matplotlib.cm as cm
import matplotlib.patches as mpatch;
import numpy as np;
import sys;
sys.path.append("/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test")
import bettersankey as bsk;
datapath = "/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/"
print("reading...")
acttable = pd.read_csv(datapath + "atusact_2015/atusact_2015.dat")
infotable = pd.read_csv(datapath + "atusresp_2015/atusresp_2015.dat")
print("joining...")
jointable = pd.merge(acttable,infotable,on='TUCASEID')
#tiermode='TRTIER2'
tiermode='TRCODE'
#columns=['case','day','hour','origin','dest','corigin','cdest']
trans = pd.DataFrame();
print("processing...")
trans['case'] = jointable['TUCASEID']
trans['caseshift'] = jointable['TUCASEID'].shift(-1)
trans['step'] = jointable['TUACTIVITY_N']
trans['day'] = jointable['TUDIARYDAY']
trans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x/60.0))
trans['origin'] = jointable[tiermode]
trans['dest'] = jointable[tiermode].shift(-1)
trans['corigin'] = jointable.apply((lambda x: (x['TUCC5'] == 1) or (x['TUCC5B'] == 1) or (x['TUCC7'] == 1) or (x['TUCC8'] == 1)),axis=1)
trans['cdest'] = trans['corigin'].shift(-1)
trans = trans[trans.caseshift.notnull()]
trans['caseshift'] = trans['caseshift'].apply(lambda x:int(x))
trans['dest'] = trans['dest'].apply(lambda x:int(x))
trans = trans[trans.case == trans.caseshift]
trans.drop('caseshift',axis=1,inplace =True)
trans.to_csv(datapath + "transitions.csv");
print(len(set(trans['dest'])));
s = trans.groupby(['origin','dest']).size()
# s.to_csv(datapath + "transitioncounts.csv")
print("plotting...")
v = s.unstack().as_matrix();
v[np.isnan(v)] = 0.0;
logv = np.log10(v);
logv[np.isneginf(logv)] = 0.0;
print("Max value:", np.max(v), " (",np.max(logv),")")
plt.pcolormesh(logv,cmap='Blues');
plt.colorbar();
plt.yticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0])
plt.xticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0],rotation=45);
plt.show()
exit();
|
normal
|
{
"blob_id": "07b6ded9b4841bdba62d481664a399f0b125fcbf",
"index": 7876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\n<mask token>\nprint('reading...')\n<mask token>\nprint('joining...')\n<mask token>\nprint('processing...')\n<mask token>\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\n<mask token>\nprint('plotting...')\n<mask token>\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-3": "<mask token>\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\n<mask token>\ndatapath = '/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/'\nprint('reading...')\nacttable = pd.read_csv(datapath + 'atusact_2015/atusact_2015.dat')\ninfotable = pd.read_csv(datapath + 'atusresp_2015/atusresp_2015.dat')\nprint('joining...')\njointable = pd.merge(acttable, infotable, on='TUCASEID')\ntiermode = 'TRCODE'\ntrans = pd.DataFrame()\nprint('processing...')\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x / 60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply(lambda x: x['TUCC5'] == 1 or x['TUCC5B'] ==\n 1 or x['TUCC7'] == 1 or x['TUCC8'] == 1, axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\ntrans = trans[trans.caseshift.notnull()]\ntrans['caseshift'] = trans['caseshift'].apply(lambda x: int(x))\ntrans['dest'] = trans['dest'].apply(lambda x: int(x))\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\ns = trans.groupby(['origin', 'dest']).size()\nprint('plotting...')\nv = s.unstack().as_matrix()\nv[np.isnan(v)] = 0.0\nlogv = np.log10(v)\nlogv[np.isneginf(logv)] = 0.0\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-4": "import pandas as pd\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport matplotlib.patches as mpatch\nimport numpy as np\nimport sys\nsys.path.append('/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test')\nimport bettersankey as bsk\ndatapath = '/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/'\nprint('reading...')\nacttable = pd.read_csv(datapath + 'atusact_2015/atusact_2015.dat')\ninfotable = pd.read_csv(datapath + 'atusresp_2015/atusresp_2015.dat')\nprint('joining...')\njointable = pd.merge(acttable, infotable, on='TUCASEID')\ntiermode = 'TRCODE'\ntrans = pd.DataFrame()\nprint('processing...')\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x / 60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply(lambda x: x['TUCC5'] == 1 or x['TUCC5B'] ==\n 1 or x['TUCC7'] == 1 or x['TUCC8'] == 1, axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\ntrans = trans[trans.caseshift.notnull()]\ntrans['caseshift'] = trans['caseshift'].apply(lambda x: int(x))\ntrans['dest'] = trans['dest'].apply(lambda x: int(x))\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift', axis=1, inplace=True)\ntrans.to_csv(datapath + 'transitions.csv')\nprint(len(set(trans['dest'])))\ns = trans.groupby(['origin', 'dest']).size()\nprint('plotting...')\nv = s.unstack().as_matrix()\nv[np.isnan(v)] = 0.0\nlogv = np.log10(v)\nlogv[np.isneginf(logv)] = 0.0\nprint('Max value:', np.max(v), ' (', np.max(logv), ')')\nplt.pcolormesh(logv, cmap='Blues')\nplt.colorbar()\nplt.yticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0])\nplt.xticks(np.arange(0, len(s.index.levels[0]), 1), s.index.levels[0],\n rotation=45)\nplt.show()\nexit()\n",
"step-5": "import pandas as pd;\nimport time;\nimport matplotlib.pyplot as plt;\nimport matplotlib.cm as cm\nimport matplotlib.patches as mpatch;\nimport numpy as np;\nimport sys;\n\nsys.path.append(\"/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/test\")\nimport bettersankey as bsk;\n\n\ndatapath = \"/uufs/chpc.utah.edu/common/home/u0403692/prog/prism/data/timeuse/\"\n\nprint(\"reading...\")\nacttable = pd.read_csv(datapath + \"atusact_2015/atusact_2015.dat\")\ninfotable = pd.read_csv(datapath + \"atusresp_2015/atusresp_2015.dat\")\nprint(\"joining...\")\njointable = pd.merge(acttable,infotable,on='TUCASEID')\n\n#tiermode='TRTIER2'\ntiermode='TRCODE'\n\n#columns=['case','day','hour','origin','dest','corigin','cdest']\ntrans = pd.DataFrame();\n\nprint(\"processing...\")\n\ntrans['case'] = jointable['TUCASEID']\ntrans['caseshift'] = jointable['TUCASEID'].shift(-1)\ntrans['step'] = jointable['TUACTIVITY_N']\ntrans['day'] = jointable['TUDIARYDAY']\ntrans['hour'] = jointable['TUCUMDUR24'].apply(lambda x: np.floor(x/60.0))\ntrans['origin'] = jointable[tiermode]\ntrans['dest'] = jointable[tiermode].shift(-1)\ntrans['corigin'] = jointable.apply((lambda x: (x['TUCC5'] == 1) or (x['TUCC5B'] == 1) or (x['TUCC7'] == 1) or (x['TUCC8'] == 1)),axis=1)\ntrans['cdest'] = trans['corigin'].shift(-1)\n\ntrans = trans[trans.caseshift.notnull()]\n\ntrans['caseshift'] = trans['caseshift'].apply(lambda x:int(x))\ntrans['dest'] = trans['dest'].apply(lambda x:int(x))\n\ntrans = trans[trans.case == trans.caseshift]\ntrans.drop('caseshift',axis=1,inplace =True)\n\ntrans.to_csv(datapath + \"transitions.csv\");\n\nprint(len(set(trans['dest'])));\n\ns = trans.groupby(['origin','dest']).size()\n\n# s.to_csv(datapath + \"transitioncounts.csv\")\n\nprint(\"plotting...\")\n\nv = s.unstack().as_matrix();\nv[np.isnan(v)] = 0.0;\nlogv = np.log10(v);\nlogv[np.isneginf(logv)] = 0.0;\n\nprint(\"Max value:\", np.max(v), \" (\",np.max(logv),\")\")\n\nplt.pcolormesh(logv,cmap='Blues');\nplt.colorbar();\nplt.yticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0])\nplt.xticks(np.arange(0,len(s.index.levels[0]),1),s.index.levels[0],rotation=45);\n\nplt.show()\n\nexit();\n\t\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def map_density(ax, syst, psi_sqrd, colormap='Reds'):
kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,
vmax=0.99 * max(psi_sqrd))
tools.edit_axis(ax, 'dens')
return 0
def density_in_line(syst, states, Op=np.eye(6)):
y_stack = []
def line(site):
x, y = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True:
y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(dos_line[0], color='red')
ax[1].plot(dos_line[1], color='blue')
plt.tight_layout()
plt.show()
<|reserved_special_token_0|>
def print_info_dos_line(y_values, dos_in_line):
print(80 * '=')
print('Size of dos_both: ', dos_in_line.shape)
print('Size of y_both: ', y_values.shape)
print('y_both:\n', y_values)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def map_density(ax, syst, psi_sqrd, colormap='Reds'):
kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,
vmax=0.99 * max(psi_sqrd))
tools.edit_axis(ax, 'dens')
return 0
def density_in_line(syst, states, Op=np.eye(6)):
y_stack = []
def line(site):
x, y = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True:
y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(dos_line[0], color='red')
ax[1].plot(dos_line[1], color='blue')
plt.tight_layout()
plt.show()
<|reserved_special_token_0|>
def print_info_dos_line(y_values, dos_in_line):
print(80 * '=')
print('Size of dos_both: ', dos_in_line.shape)
print('Size of y_both: ', y_values.shape)
print('y_both:\n', y_values)
def main():
hamiltonian = gasb.hamiltonian_97_k_plus()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)
wf = kwant.wave_function(syst, energy=energia, params=parametros)
modes_left = wf(0)
modes_right = wf(1)
sigma_z = tinyarray.array([[1, 0], [0, -1]])
spin_proj = np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
plot_dos_in_line(dos_in_line_from_left)
print(sum(dos_in_line_from_both).shape)
colorRight = 'seismic'
colorLeft = 'seismic'
fig, ax = plt.subplots(2, 2, figsize=(14, 6))
y_values_left = y_values_left * (shapes.A0 / 10)
y_values_right = y_values_right * (shapes.A0 / 10)
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap=colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle='--')
ax[0][0].set_title('left lead')
map_density(ax[1][0], syst, psi_right, colormap=colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle='--')
ax[1][0].set_title('right lead')
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=
'.', markersize=2.5, linestyle='-')
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker
='.', markersize=2.5, linestyle='-')
plt.tight_layout()
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def map_density(ax, syst, psi_sqrd, colormap='Reds'):
kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,
vmax=0.99 * max(psi_sqrd))
tools.edit_axis(ax, 'dens')
return 0
def density_in_line(syst, states, Op=np.eye(6)):
y_stack = []
def line(site):
x, y = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True:
y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(dos_line[0], color='red')
ax[1].plot(dos_line[1], color='blue')
plt.tight_layout()
plt.show()
def normalize(dos_in_line):
return sum(dos_in_line) / max(sum(dos_in_line))
def print_info_dos_line(y_values, dos_in_line):
print(80 * '=')
print('Size of dos_both: ', dos_in_line.shape)
print('Size of y_both: ', y_values.shape)
print('y_both:\n', y_values)
def main():
hamiltonian = gasb.hamiltonian_97_k_plus()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)
wf = kwant.wave_function(syst, energy=energia, params=parametros)
modes_left = wf(0)
modes_right = wf(1)
sigma_z = tinyarray.array([[1, 0], [0, -1]])
spin_proj = np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
plot_dos_in_line(dos_in_line_from_left)
print(sum(dos_in_line_from_both).shape)
colorRight = 'seismic'
colorLeft = 'seismic'
fig, ax = plt.subplots(2, 2, figsize=(14, 6))
y_values_left = y_values_left * (shapes.A0 / 10)
y_values_right = y_values_right * (shapes.A0 / 10)
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap=colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle='--')
ax[0][0].set_title('left lead')
map_density(ax[1][0], syst, psi_right, colormap=colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle='--')
ax[1][0].set_title('right lead')
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=
'.', markersize=2.5, linestyle='-')
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker
='.', markersize=2.5, linestyle='-')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import kwant
import tinyarray
from hamiltonians import gasb_hamiltonian as gasb
from system_geometry import shapes
from transport_tools import bands_and_currents as tools
def map_density(ax, syst, psi_sqrd, colormap='Reds'):
kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,
vmax=0.99 * max(psi_sqrd))
tools.edit_axis(ax, 'dens')
return 0
def density_in_line(syst, states, Op=np.eye(6)):
y_stack = []
def line(site):
x, y = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True:
y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].plot(dos_line[0], color='red')
ax[1].plot(dos_line[1], color='blue')
plt.tight_layout()
plt.show()
def normalize(dos_in_line):
return sum(dos_in_line) / max(sum(dos_in_line))
def print_info_dos_line(y_values, dos_in_line):
print(80 * '=')
print('Size of dos_both: ', dos_in_line.shape)
print('Size of y_both: ', y_values.shape)
print('y_both:\n', y_values)
def main():
hamiltonian = gasb.hamiltonian_97_k_plus()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)
wf = kwant.wave_function(syst, energy=energia, params=parametros)
modes_left = wf(0)
modes_right = wf(1)
sigma_z = tinyarray.array([[1, 0], [0, -1]])
spin_proj = np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
plot_dos_in_line(dos_in_line_from_left)
print(sum(dos_in_line_from_both).shape)
colorRight = 'seismic'
colorLeft = 'seismic'
fig, ax = plt.subplots(2, 2, figsize=(14, 6))
y_values_left = y_values_left * (shapes.A0 / 10)
y_values_right = y_values_right * (shapes.A0 / 10)
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap=colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle='--')
ax[0][0].set_title('left lead')
map_density(ax[1][0], syst, psi_right, colormap=colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle='--')
ax[1][0].set_title('right lead')
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=
'.', markersize=2.5, linestyle='-')
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker
='.', markersize=2.5, linestyle='-')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
'''
Script for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.
This piece code is part of the project "phd_gasb_inas", which comprises the work
related to the Phd. Dissertation named: "Quantum transport of charge and spin in
topological insulators 2D".
Author: Marcos Medeiros
email: [email protected]
'''
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Kwant related stuff
import kwant
import tinyarray
# local application imports
from hamiltonians import gasb_hamiltonian as gasb
from system_geometry import shapes
from transport_tools import bands_and_currents as tools
def map_density(ax, syst, psi_sqrd, colormap = "Reds"):
# Plot the results:
# print(max(psi_sqrd))
kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))
tools.edit_axis(ax,'dens')
return 0
def density_in_line(syst, states, Op = np.eye(6)):
y_stack = []
def line(site):
(x, y) = site.pos
half = 0
delta = shapes.A_STD
ans = abs(x - half) < delta
if ans == True : y_stack.append(y)
return ans
rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)
dos_line = np.array([rho_line(p) for p in states])
return dos_line, np.array(y_stack)
def plot_dos_in_line(dos_line):
fig, ax = plt.subplots(1, 2, figsize = (10,5))
ax[0].plot(dos_line[0], color = 'red')
ax[1].plot(dos_line[1], color = 'blue')
plt.tight_layout()
plt.show()
def normalize(dos_in_line):
# return sum(dos_in_line)
return sum(dos_in_line)/max(sum(dos_in_line))
def print_info_dos_line(y_values, dos_in_line):
print(80*"=")
print("Size of dos_both: ", dos_in_line.shape)
print("Size of y_both: ", y_values.shape)
print("y_both:\n", y_values)
def main():
# Define the system:
hamiltonian = gasb.hamiltonian_97_k_plus()
# hamiltonian = gasb.hamiltonian_97_down()
lead_ham = gasb.free_ham(6)
centralShape = shapes.Rect()
syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)
# Calculate the wave_function:
energia = 448
parametros = gasb.params_97
parametros['Eta3'] = 0
parametros['Eta2'] = 0
parametros['eF'] = 60
parametros = dict(GammaLead = parametros["GammaC"], V = 100, **parametros )
wf = kwant.wave_function(syst, energy = energia, params = parametros)
modes_left = wf(0)
modes_right = wf(1)
# modes_total = np.vstack((wf(0), wf(1)))
# Calculate the density:
sigma_z = tinyarray.array([[1,0],[0,-1]])
spin_proj= np.kron(sigma_z, np.eye(3))
identity = np.eye(6)
rho = kwant.operator.Density(syst, spin_proj)
psi_left = sum(rho(p) for p in modes_left)
psi_right = sum(rho(p) for p in modes_right)
# Calculate dos in a line
dos_in_line_from_left = density_in_line(syst, psi)
dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))
plt.plot(sum(dos_in_line_from_both))
plt.show()
# print(dos_in_line.shape)
# print(dos_in_line)
plot_dos_in_line(dos_in_line_from_left)
# plot_dos_in_line(dos_in_line_from_both)
print(sum(dos_in_line_from_both).shape)
# Plot the results:
colorRight = "seismic"
colorLeft = "seismic"
fig, ax = plt.subplots(2,2,figsize=(14,6))
y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}
y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}
min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD
map_density(ax[0][0], syst, psi_left, colormap = colorRight)
ax[0][0].vlines(0, min_line, max_line, linestyle = "--")
ax[0][0].set_title("left lead")
map_density(ax[1][0], syst, psi_right, colormap = colorLeft)
ax[1][0].vlines(0, min_line, max_line, linestyle = "--")
ax[1][0].set_title("right lead")
ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),
marker = ".", markersize = 2.5, linestyle = "-" )
ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),
marker = ".", markersize = 2.5, linestyle = "-" )
plt.tight_layout()
plt.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "a012055d11202c68d9eddf5cf2a17043f9bbaf0a",
"index": 6851,
"step-1": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport kwant\nimport tinyarray\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap='Reds'):\n kwant.plotter.map(syst, psi_sqrd, ax=ax, fig_size=(7, 3), cmap=colormap,\n vmax=0.99 * max(psi_sqrd))\n tools.edit_axis(ax, 'dens')\n return 0\n\n\ndef density_in_line(syst, states, Op=np.eye(6)):\n y_stack = []\n\n def line(site):\n x, y = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True:\n y_stack.append(y)\n return ans\n rho_line = kwant.operator.Density(syst, Op, where=line, sum=False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize=(10, 5))\n ax[0].plot(dos_line[0], color='red')\n ax[1].plot(dos_line[1], color='blue')\n plt.tight_layout()\n plt.show()\n\n\ndef normalize(dos_in_line):\n return sum(dos_in_line) / max(sum(dos_in_line))\n\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80 * '=')\n print('Size of dos_both: ', dos_in_line.shape)\n print('Size of y_both: ', y_values.shape)\n print('y_both:\\n', y_values)\n\n\ndef main():\n hamiltonian = gasb.hamiltonian_97_k_plus()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead=parametros['GammaC'], V=100, **parametros)\n wf = kwant.wave_function(syst, energy=energia, params=parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n sigma_z = tinyarray.array([[1, 0], [0, -1]])\n spin_proj = np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0), wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n plot_dos_in_line(dos_in_line_from_left)\n print(sum(dos_in_line_from_both).shape)\n colorRight = 'seismic'\n colorLeft = 'seismic'\n fig, ax = plt.subplots(2, 2, figsize=(14, 6))\n y_values_left = y_values_left * (shapes.A0 / 10)\n y_values_right = y_values_right * (shapes.A0 / 10)\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n map_density(ax[0][0], syst, psi_left, colormap=colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle='--')\n ax[0][0].set_title('left lead')\n map_density(ax[1][0], syst, psi_right, colormap=colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle='--')\n ax[1][0].set_title('right lead')\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left), marker=\n '.', markersize=2.5, linestyle='-')\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right), marker\n ='.', markersize=2.5, linestyle='-')\n plt.tight_layout()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n'''\nScript for analysis of wavefunctions on GaSb/InAs/GaSb simmetric quantum wells.\n\nThis piece code is part of the project \"phd_gasb_inas\", which comprises the work\nrelated to the Phd. Dissertation named: \"Quantum transport of charge and spin in\ntopological insulators 2D\".\n\nAuthor: Marcos Medeiros\nemail: [email protected]\n'''\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Kwant related stuff\nimport kwant\nimport tinyarray\n\n# local application imports\nfrom hamiltonians import gasb_hamiltonian as gasb\nfrom system_geometry import shapes\nfrom transport_tools import bands_and_currents as tools\n\n\ndef map_density(ax, syst, psi_sqrd, colormap = \"Reds\"):\n # Plot the results:\n # print(max(psi_sqrd))\n kwant.plotter.map(syst, psi_sqrd, ax = ax, fig_size = (7,3), cmap = colormap, vmax = 0.99*max(psi_sqrd))\n tools.edit_axis(ax,'dens')\n return 0\n\ndef density_in_line(syst, states, Op = np.eye(6)):\n y_stack = []\n def line(site):\n (x, y) = site.pos\n half = 0\n delta = shapes.A_STD\n ans = abs(x - half) < delta\n if ans == True : y_stack.append(y)\n return ans\n\n rho_line = kwant.operator.Density(syst, Op, where = line, sum = False)\n dos_line = np.array([rho_line(p) for p in states])\n return dos_line, np.array(y_stack)\n\ndef plot_dos_in_line(dos_line):\n fig, ax = plt.subplots(1, 2, figsize = (10,5))\n ax[0].plot(dos_line[0], color = 'red')\n ax[1].plot(dos_line[1], color = 'blue')\n plt.tight_layout()\n plt.show()\n\ndef normalize(dos_in_line):\n # return sum(dos_in_line)\n return sum(dos_in_line)/max(sum(dos_in_line))\n\ndef print_info_dos_line(y_values, dos_in_line):\n print(80*\"=\")\n print(\"Size of dos_both: \", dos_in_line.shape)\n print(\"Size of y_both: \", y_values.shape)\n print(\"y_both:\\n\", y_values)\n\n\n\ndef main():\n # Define the system:\n hamiltonian = gasb.hamiltonian_97_k_plus()\n # hamiltonian = gasb.hamiltonian_97_down()\n lead_ham = gasb.free_ham(6)\n centralShape = shapes.Rect()\n syst = gasb.system_builder(hamiltonian, lead_ham, centralShape)\n\n\n # Calculate the wave_function:\n energia = 448\n parametros = gasb.params_97\n parametros['Eta3'] = 0\n parametros['Eta2'] = 0\n parametros['eF'] = 60\n parametros = dict(GammaLead = parametros[\"GammaC\"], V = 100, **parametros )\n wf = kwant.wave_function(syst, energy = energia, params = parametros)\n modes_left = wf(0)\n modes_right = wf(1)\n # modes_total = np.vstack((wf(0), wf(1)))\n\n\n # Calculate the density:\n sigma_z = tinyarray.array([[1,0],[0,-1]])\n spin_proj= np.kron(sigma_z, np.eye(3))\n identity = np.eye(6)\n rho = kwant.operator.Density(syst, spin_proj)\n psi_left = sum(rho(p) for p in modes_left)\n psi_right = sum(rho(p) for p in modes_right)\n\n\n # Calculate dos in a line\n dos_in_line_from_left = density_in_line(syst, psi)\n dos_in_line_from_both = density_in_line(syst, np.vstack((wf(0),wf(1))))\n plt.plot(sum(dos_in_line_from_both))\n plt.show()\n # print(dos_in_line.shape)\n # print(dos_in_line)\n plot_dos_in_line(dos_in_line_from_left)\n # plot_dos_in_line(dos_in_line_from_both)\n print(sum(dos_in_line_from_both).shape)\n\n\n # Plot the results:\n colorRight = \"seismic\"\n colorLeft = \"seismic\"\n fig, ax = plt.subplots(2,2,figsize=(14,6))\n y_values_left = y_values_left * (shapes.A0 / 10) # conversion to nm^{-1}\n y_values_right = y_values_right * (shapes.A0 / 10) # conversion to nm^{-1}\n min_line, max_line = -0.7 * shapes.L_STD, 0.7 * shapes.L_STD\n\n map_density(ax[0][0], syst, psi_left, colormap = colorRight)\n ax[0][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[0][0].set_title(\"left lead\")\n map_density(ax[1][0], syst, psi_right, colormap = colorLeft)\n ax[1][0].vlines(0, min_line, max_line, linestyle = \"--\")\n ax[1][0].set_title(\"right lead\")\n\n ax[0][1].plot(y_values_left, normalize(dos_in_line_from_left),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n ax[1][1].plot(y_values_right, normalize(dos_in_line_from_right),\n marker = \".\", markersize = 2.5, linestyle = \"-\" )\n plt.tight_layout()\n plt.show()\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
import os, sys
import math
import argparse
import shutil
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold
from keras.models import Sequential
from keras.layers import Dense, Dropout, LocallyConnected1D, Activation, \
GaussianNoise, GaussianDropout
from keras.layers.normalization import BatchNormalization
from keras.wrappers.scikit_learn import KerasRegressor
from keras.utils import multi_gpu_model
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model
from keras.callbacks import Callback
import timeit
import pickle
from openeye import oechem
from torsion.model import get_sf_elements
from torsion.analysis import get_dihedral_inchi_key
import matplotlib.pyplot as plt
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
def get_model(num_feat=294, lr=1e-3, drop_out=0.1, layer_dims=''):
model = Sequential()
act_fn = 'relu'
if len(layer_dims) == 0:
layer_dims = [10, 5, 0.2]
else:
layer_dims = [float(d) for d in layer_dims.split('-')]
model.add(
Dense(
int(num_feat * layer_dims[0]), input_dim=num_feat,
kernel_initializer='normal'))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
for layer_dim in layer_dims[1:-1]:
model.add(Dense(int(num_feat * layer_dim)))
model.add(Activation(act_fn))
model.add(BatchNormalization())
model.add(Dropout(drop_out))
model.add(Dense(int(num_feat * layer_dims[-1])))
model.add(Activation(act_fn))
model.add(Dropout(drop_out))
model.add(Dense(1))
adam = Adam(lr=lr)
model.compile(loss='logcosh', optimizer=adam)
return model
ENERGY_KEY = 'ENERGY'
INCHI_KEY = 'Inchi'
def generate_training_input(mol_file):
'''
:param mol_file: str
:return: pd.DataFrame
'''
ifs = oechem.oemolistream(mol_file)
training_data = []
for mol in ifs.GetOEGraphMols():
energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))
sf_elements = get_sf_elements(mol)
dihe_inchi = get_dihedral_inchi_key(mol)
data = [dihe_inchi, energy]
data.extend(sf_elements)
training_data.append(data)
ifs.close()
columns = [INCHI_KEY, ENERGY_KEY]
num_sf_elements = len(training_data[0]) - 2
sf_columns = ['sf_%d'%(i+1) for i in range(num_sf_elements)]
columns.extend(sf_columns)
df = pd.DataFrame(training_data, columns=columns)
# calculate relative energy for each profile
grouped = df.loc[:,[INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)
df2 = grouped.transform(lambda x: x - x.min())
df[ENERGY_KEY] = df2[ENERGY_KEY]
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Train neural network model to predict torsional relative energy')
parser.add_argument('--input', type=str, help='sd file containing MM structures alongwith '
'sd properties with torsion atom indices and QM energy')
parser.add_argument('--num_epoch', default=5000, type=int, help='number of epoch (default = 2000)')
parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')
parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str, help='layer dimensions')
parser.add_argument('--lr', default=0.0001, type=float, help='learning rate (default: 1e-r)')
parser.add_argument('--dropout', default=0.2, type=float, help='dropout (default: 0.2)')
parser.add_argument('--val_split', default=0.1, type=float, help='validation split (default: 0.1)')
parser.add_argument('--scalar', default='scaler.pkl', type=str, help='output file with standard scaler')
parser.add_argument('--model', default='model.h5', type=str, help='output file with trained model')
parser.add_argument('-v', '--verbose', action='count', default=0)
args = parser.parse_args()
input_file = args.input
num_epoch = args.num_epoch
batch_size = args.batch_size
lr = args.lr
dropout = args.dropout
layer_dims = args.layer_dims
# generate training data using the molecules in the input file
# for each molecule in the input file, extract the QM energy from SD property "ENERGY"
# and generate symmetry function elements around the specified torsion (SD property "TORSION_ATOMS_FRAGMENT")
df = generate_training_input(input_file)
# cap the relative energy
tmp_idx = df.ENERGY > 30
df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])
dihe_inchis = df[INCHI_KEY].unique()
print('Number of profiles: %d' % len(dihe_inchis))
desc_bgn_idx = df.columns.get_loc('sf_1')
Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])
ytrain = df.ENERGY
# feature transformation
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
print('Xtrain.shape ', Xtrain.shape)
# save feature transformation
with open(args.scalar, 'wb') as fptr:
pickle.dump(scaler, fptr)
_, num_feat = Xtrain.shape
# early stopping criteria
earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=100, \
verbose=1, mode='auto')
model_file = args.model
# create DNN model
model = get_model(num_feat, lr, dropout, layer_dims)
print(model.summary())
checkpointer = ModelCheckpoint(
filepath=model_file, verbose=1, save_best_only=True)
callbacks_list = [checkpointer]
# train DNN model
model.fit(
Xtrain,
ytrain,
epochs=num_epoch,
batch_size=batch_size,
validation_split=args.val_split,
callbacks=callbacks_list,
verbose=1)
print('Training complete')
print('Standard scalar is saved in %s' % args.scalar)
print('Model is saved in %s' % args.model)
|
normal
|
{
"blob_id": "ed35a9bc3dd267c9a5fe76ccbb1b4ac5261fc3c8",
"index": 1993,
"step-1": "<mask token>\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\n<mask token>\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-3": "<mask token>\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-4": "import os, sys\nimport math\nimport argparse\nimport shutil\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import KFold\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LocallyConnected1D, Activation, GaussianNoise, GaussianDropout\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.utils import multi_gpu_model\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.callbacks import Callback\nimport timeit\nimport pickle\nfrom openeye import oechem\nfrom torsion.model import get_sf_elements\nfrom torsion.analysis import get_dihedral_inchi_key\nimport matplotlib.pyplot as plt\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=0.001, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n model.add(Dense(int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n model.add(Dense(1))\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\n\ndef generate_training_input(mol_file):\n \"\"\"\n\n\n :param mol_file: str\n :return: pd.DataFrame\n \"\"\"\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n ifs.close()\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = [('sf_%d' % (i + 1)) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n df = pd.DataFrame(training_data, columns=columns)\n grouped = df.loc[:, [INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n return df\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help=\n 'sd file containing MM structures alongwith sd properties with torsion atom indices and QM energy'\n )\n parser.add_argument('--num_epoch', default=5000, type=int, help=\n 'number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help=\n 'batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str,\n help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help=\n 'learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help=\n 'dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help=\n 'validation split (default: 0.1)')\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help=\n 'output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help=\n 'output file with trained model')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n input_file = args.input\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n df = generate_training_input(input_file)\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n desc_bgn_idx = df.columns.get_loc('sf_1')\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n print('Xtrain.shape ', Xtrain.shape)\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n _, num_feat = Xtrain.shape\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience\n =100, verbose=1, mode='auto')\n model_file = args.model\n model = get_model(num_feat, lr, dropout, layer_dims)\n print(model.summary())\n checkpointer = ModelCheckpoint(filepath=model_file, verbose=1,\n save_best_only=True)\n callbacks_list = [checkpointer]\n model.fit(Xtrain, ytrain, epochs=num_epoch, batch_size=batch_size,\n validation_split=args.val_split, callbacks=callbacks_list, verbose=1)\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n",
"step-5": "import os, sys\nimport math\nimport argparse\nimport shutil\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import KFold\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, LocallyConnected1D, Activation, \\\n GaussianNoise, GaussianDropout\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras.utils import multi_gpu_model\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.optimizers import Adam\nfrom keras.models import load_model\nfrom keras.callbacks import Callback\n\nimport timeit\nimport pickle\n\nfrom openeye import oechem\n\nfrom torsion.model import get_sf_elements\nfrom torsion.analysis import get_dihedral_inchi_key\n\nimport matplotlib.pyplot as plt\n\n# fix random seed for reproducibility\nseed = 7\nnp.random.seed(seed)\n\n\ndef get_model(num_feat=294, lr=1e-3, drop_out=0.1, layer_dims=''):\n model = Sequential()\n act_fn = 'relu'\n\n if len(layer_dims) == 0:\n layer_dims = [10, 5, 0.2]\n else:\n layer_dims = [float(d) for d in layer_dims.split('-')]\n\n model.add(\n Dense(\n int(num_feat * layer_dims[0]), input_dim=num_feat,\n kernel_initializer='normal'))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n\n for layer_dim in layer_dims[1:-1]:\n model.add(Dense(int(num_feat * layer_dim)))\n model.add(Activation(act_fn))\n model.add(BatchNormalization())\n model.add(Dropout(drop_out))\n\n model.add(Dense(int(num_feat * layer_dims[-1])))\n model.add(Activation(act_fn))\n model.add(Dropout(drop_out))\n\n model.add(Dense(1))\n\n adam = Adam(lr=lr)\n model.compile(loss='logcosh', optimizer=adam)\n\n return model\n\n\nENERGY_KEY = 'ENERGY'\nINCHI_KEY = 'Inchi'\n\ndef generate_training_input(mol_file):\n '''\n\n\n :param mol_file: str\n :return: pd.DataFrame\n '''\n ifs = oechem.oemolistream(mol_file)\n training_data = []\n for mol in ifs.GetOEGraphMols():\n energy = float(oechem.OEGetSDData(mol, ENERGY_KEY))\n sf_elements = get_sf_elements(mol)\n dihe_inchi = get_dihedral_inchi_key(mol)\n\n data = [dihe_inchi, energy]\n data.extend(sf_elements)\n training_data.append(data)\n\n ifs.close()\n\n columns = [INCHI_KEY, ENERGY_KEY]\n num_sf_elements = len(training_data[0]) - 2\n sf_columns = ['sf_%d'%(i+1) for i in range(num_sf_elements)]\n columns.extend(sf_columns)\n\n df = pd.DataFrame(training_data, columns=columns)\n\n # calculate relative energy for each profile\n grouped = df.loc[:,[INCHI_KEY, ENERGY_KEY]].groupby(INCHI_KEY)\n df2 = grouped.transform(lambda x: x - x.min())\n df[ENERGY_KEY] = df2[ENERGY_KEY]\n\n return df\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Train neural network model to predict torsional relative energy')\n parser.add_argument('--input', type=str, help='sd file containing MM structures alongwith '\n 'sd properties with torsion atom indices and QM energy')\n parser.add_argument('--num_epoch', default=5000, type=int, help='number of epoch (default = 2000)')\n parser.add_argument('--batch_size', default=256, type=int, help='batch size (default: 256)')\n parser.add_argument('--layer_dims', default='10-5-1-0.2', type=str, help='layer dimensions')\n parser.add_argument('--lr', default=0.0001, type=float, help='learning rate (default: 1e-r)')\n parser.add_argument('--dropout', default=0.2, type=float, help='dropout (default: 0.2)')\n parser.add_argument('--val_split', default=0.1, type=float, help='validation split (default: 0.1)')\n\n parser.add_argument('--scalar', default='scaler.pkl', type=str, help='output file with standard scaler')\n parser.add_argument('--model', default='model.h5', type=str, help='output file with trained model')\n\n parser.add_argument('-v', '--verbose', action='count', default=0)\n args = parser.parse_args()\n\n input_file = args.input\n\n num_epoch = args.num_epoch\n batch_size = args.batch_size\n lr = args.lr\n dropout = args.dropout\n layer_dims = args.layer_dims\n\n # generate training data using the molecules in the input file\n # for each molecule in the input file, extract the QM energy from SD property \"ENERGY\"\n # and generate symmetry function elements around the specified torsion (SD property \"TORSION_ATOMS_FRAGMENT\")\n df = generate_training_input(input_file)\n\n # cap the relative energy\n tmp_idx = df.ENERGY > 30\n df.ENERGY[tmp_idx] = 30.0 + np.exp(30 - df.ENERGY[tmp_idx])\n\n dihe_inchis = df[INCHI_KEY].unique()\n print('Number of profiles: %d' % len(dihe_inchis))\n\n desc_bgn_idx = df.columns.get_loc('sf_1')\n\n Xtrain = df.as_matrix(columns=df.columns[desc_bgn_idx:])\n ytrain = df.ENERGY\n\n # feature transformation\n scaler = StandardScaler().fit(Xtrain)\n Xtrain = scaler.transform(Xtrain)\n\n print('Xtrain.shape ', Xtrain.shape)\n\n # save feature transformation\n with open(args.scalar, 'wb') as fptr:\n pickle.dump(scaler, fptr)\n\n _, num_feat = Xtrain.shape\n\n # early stopping criteria\n earlystop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=100, \\\n verbose=1, mode='auto')\n\n model_file = args.model\n # create DNN model\n model = get_model(num_feat, lr, dropout, layer_dims)\n\n print(model.summary())\n\n checkpointer = ModelCheckpoint(\n filepath=model_file, verbose=1, save_best_only=True)\n callbacks_list = [checkpointer]\n\n # train DNN model\n model.fit(\n Xtrain,\n ytrain,\n epochs=num_epoch,\n batch_size=batch_size,\n validation_split=args.val_split,\n callbacks=callbacks_list,\n verbose=1)\n\n print('Training complete')\n print('Standard scalar is saved in %s' % args.scalar)\n print('Model is saved in %s' % args.model)\n\n\n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
"""A utility for outputting graphs as pickle files.
To test, run ``openbiolink generate --no-download --no-input --output-format pickle --qual hq``.
"""
import os
import pickle
from typing import Mapping
from openbiolink.edge import Edge
from openbiolink.graph_creation.graph_writer.base import GraphWriter
__all__ = [
"GraphPickleWriter",
]
class GraphPickleWriter(GraphWriter):
format_key = 'PICKLE'
def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):
"""Write the graph as pickles."""
with open(os.path.join(self.graph_dir_path, "tp_nodes.pkl"), "wb") as file:
pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_edges.pkl"), "wb") as file:
pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tp_namespaces.pkl"), "wb") as file:
pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_nodes.pkl"), "wb") as file:
pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_edges.pkl"), "wb") as file:
pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.graph_dir_path, "tn_namespaces.pkl"), "wb") as file:
pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)
|
normal
|
{
"blob_id": "58d069f6700149793c3446bdd4677f08eaf301ee",
"index": 670,
"step-1": "<mask token>\n\n\nclass GraphPickleWriter(GraphWriter):\n <mask token>\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-2": "<mask token>\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-3": "<mask token>\n__all__ = ['GraphPickleWriter']\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-4": "<mask token>\nimport os\nimport pickle\nfrom typing import Mapping\nfrom openbiolink.edge import Edge\nfrom openbiolink.graph_creation.graph_writer.base import GraphWriter\n__all__ = ['GraphPickleWriter']\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge],\n tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, 'tp_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tp_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_nodes.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_edges.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, 'tn_namespaces.pkl'), 'wb'\n ) as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-5": "\"\"\"A utility for outputting graphs as pickle files.\n\nTo test, run ``openbiolink generate --no-download --no-input --output-format pickle --qual hq``.\n\"\"\"\n\nimport os\nimport pickle\nfrom typing import Mapping\n\nfrom openbiolink.edge import Edge\nfrom openbiolink.graph_creation.graph_writer.base import GraphWriter\n\n__all__ = [\n \"GraphPickleWriter\",\n]\n\n\nclass GraphPickleWriter(GraphWriter):\n format_key = 'PICKLE'\n\n def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n \"\"\"Write the graph as pickles.\"\"\"\n with open(os.path.join(self.graph_dir_path, \"tp_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_edges.pkl\"), \"wb\") as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_edges.pkl\"), \"wb\") as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 2.1.7 on 2019-03-24 07:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('adminsite', '0005_auto_20190324_0706'),
]
operations = [
migrations.RenameField(
model_name='district',
old_name='District',
new_name='district',
),
]
|
normal
|
{
"blob_id": "6e56c7792d88385cc28c48a7d6dd32b9d6917c64",
"index": 2913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('adminsite', '0005_auto_20190324_0706')]\n operations = [migrations.RenameField(model_name='district', old_name=\n 'District', new_name='district')]\n",
"step-5": "# Generated by Django 2.1.7 on 2019-03-24 07:08\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('adminsite', '0005_auto_20190324_0706'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='district',\n old_name='District',\n new_name='district',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime, time, threading, os
from . import queues
logLevels = ["none", "info", "debug"]
level = "none"
def write(message):
queues.logger_queue.put(message)
def runLogger():
while True:
# The log path should be read from config. Pass into logger?
log_path = "/home/pi/Desktop/Projects/rose/robot_code/logs/"
try:
os.makedirs(log_path)
except FileExistsError:
pass
file_name = log_path + "Logs-" + str(datetime.date.today())
if not queues.logger_queue.empty():
message = queues.logger_queue.get()
if message == "turn off":
break
else:
writeFile(file_name, message)
else:
continue
def writeFile(file_name = None, message = None, lvl = "none"):
global logLevels
global level
index = logLevels.index(level)
if lvl in logLevels:
lvlIndex = logLevels.index(lvl)
else:
lvlIndex = 0
if index >= lvlIndex:
if not file_name is None and not message is None:
with open(file_name + ".txt", "a") as fileObj:
fileObj.write(message)
fileObj.write("\n")
def writeEnd():
queues.logger_queue.put("turn off")
|
normal
|
{
"blob_id": "91188b55b0f5d8277812d82711f5bcde82819b30",
"index": 9563,
"step-1": "<mask token>\n\n\ndef runLogger():\n while True:\n log_path = '/home/pi/Desktop/Projects/rose/robot_code/logs/'\n try:\n os.makedirs(log_path)\n except FileExistsError:\n pass\n file_name = log_path + 'Logs-' + str(datetime.date.today())\n if not queues.logger_queue.empty():\n message = queues.logger_queue.get()\n if message == 'turn off':\n break\n else:\n writeFile(file_name, message)\n else:\n continue\n\n\ndef writeFile(file_name=None, message=None, lvl='none'):\n global logLevels\n global level\n index = logLevels.index(level)\n if lvl in logLevels:\n lvlIndex = logLevels.index(lvl)\n else:\n lvlIndex = 0\n if index >= lvlIndex:\n if not file_name is None and not message is None:\n with open(file_name + '.txt', 'a') as fileObj:\n fileObj.write(message)\n fileObj.write('\\n')\n\n\ndef writeEnd():\n queues.logger_queue.put('turn off')\n",
"step-2": "<mask token>\n\n\ndef write(message):\n queues.logger_queue.put(message)\n\n\ndef runLogger():\n while True:\n log_path = '/home/pi/Desktop/Projects/rose/robot_code/logs/'\n try:\n os.makedirs(log_path)\n except FileExistsError:\n pass\n file_name = log_path + 'Logs-' + str(datetime.date.today())\n if not queues.logger_queue.empty():\n message = queues.logger_queue.get()\n if message == 'turn off':\n break\n else:\n writeFile(file_name, message)\n else:\n continue\n\n\ndef writeFile(file_name=None, message=None, lvl='none'):\n global logLevels\n global level\n index = logLevels.index(level)\n if lvl in logLevels:\n lvlIndex = logLevels.index(lvl)\n else:\n lvlIndex = 0\n if index >= lvlIndex:\n if not file_name is None and not message is None:\n with open(file_name + '.txt', 'a') as fileObj:\n fileObj.write(message)\n fileObj.write('\\n')\n\n\ndef writeEnd():\n queues.logger_queue.put('turn off')\n",
"step-3": "<mask token>\nlogLevels = ['none', 'info', 'debug']\nlevel = 'none'\n\n\ndef write(message):\n queues.logger_queue.put(message)\n\n\ndef runLogger():\n while True:\n log_path = '/home/pi/Desktop/Projects/rose/robot_code/logs/'\n try:\n os.makedirs(log_path)\n except FileExistsError:\n pass\n file_name = log_path + 'Logs-' + str(datetime.date.today())\n if not queues.logger_queue.empty():\n message = queues.logger_queue.get()\n if message == 'turn off':\n break\n else:\n writeFile(file_name, message)\n else:\n continue\n\n\ndef writeFile(file_name=None, message=None, lvl='none'):\n global logLevels\n global level\n index = logLevels.index(level)\n if lvl in logLevels:\n lvlIndex = logLevels.index(lvl)\n else:\n lvlIndex = 0\n if index >= lvlIndex:\n if not file_name is None and not message is None:\n with open(file_name + '.txt', 'a') as fileObj:\n fileObj.write(message)\n fileObj.write('\\n')\n\n\ndef writeEnd():\n queues.logger_queue.put('turn off')\n",
"step-4": "import datetime, time, threading, os\nfrom . import queues\nlogLevels = ['none', 'info', 'debug']\nlevel = 'none'\n\n\ndef write(message):\n queues.logger_queue.put(message)\n\n\ndef runLogger():\n while True:\n log_path = '/home/pi/Desktop/Projects/rose/robot_code/logs/'\n try:\n os.makedirs(log_path)\n except FileExistsError:\n pass\n file_name = log_path + 'Logs-' + str(datetime.date.today())\n if not queues.logger_queue.empty():\n message = queues.logger_queue.get()\n if message == 'turn off':\n break\n else:\n writeFile(file_name, message)\n else:\n continue\n\n\ndef writeFile(file_name=None, message=None, lvl='none'):\n global logLevels\n global level\n index = logLevels.index(level)\n if lvl in logLevels:\n lvlIndex = logLevels.index(lvl)\n else:\n lvlIndex = 0\n if index >= lvlIndex:\n if not file_name is None and not message is None:\n with open(file_name + '.txt', 'a') as fileObj:\n fileObj.write(message)\n fileObj.write('\\n')\n\n\ndef writeEnd():\n queues.logger_queue.put('turn off')\n",
"step-5": "import datetime, time, threading, os\nfrom . import queues\n\nlogLevels = [\"none\", \"info\", \"debug\"]\nlevel = \"none\"\n\ndef write(message):\n queues.logger_queue.put(message)\n\ndef runLogger():\n while True:\n # The log path should be read from config. Pass into logger?\n log_path = \"/home/pi/Desktop/Projects/rose/robot_code/logs/\"\n try:\n os.makedirs(log_path)\n except FileExistsError:\n pass\n\n file_name = log_path + \"Logs-\" + str(datetime.date.today())\n if not queues.logger_queue.empty():\n message = queues.logger_queue.get()\n if message == \"turn off\":\n break\n else:\n writeFile(file_name, message)\n else:\n continue\n\ndef writeFile(file_name = None, message = None, lvl = \"none\"):\n global logLevels\n global level\n\n index = logLevels.index(level)\n\n if lvl in logLevels:\n lvlIndex = logLevels.index(lvl)\n else:\n lvlIndex = 0\n\n if index >= lvlIndex:\n if not file_name is None and not message is None:\n with open(file_name + \".txt\", \"a\") as fileObj:\n fileObj.write(message)\n fileObj.write(\"\\n\")\n\ndef writeEnd():\n queues.logger_queue.put(\"turn off\")\n \n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
import tensorflow as tf
K_model = tf.keras.models.load_model('K_model.h5')
K_model.summary()
features, labels = [], []
# k_file = open('dataset_20200409.tab')
k_file = open('ts.tab')
for line in k_file.readlines():
line = line.rstrip()
contents = line.split("\t")
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
MAE = 0
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
|
normal
|
{
"blob_id": "1c2a862f995869e3241dd835edb69399141bfb64",
"index": 8926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nK_model.summary()\n<mask token>\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\n<mask token>\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-3": "<mask token>\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\nfeatures, labels = [], []\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\nfeatures, labels = [], []\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-5": "import numpy as np \nimport tensorflow as tf\n\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\n\nfeatures, labels = [], []\n# k_file = open('dataset_20200409.tab')\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split(\"\\t\")\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass \n\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass \nprint(MAE)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
import requests
import Queue
import codecs
import os
import urllib
import base64
from threading import Thread
from Crypto.Cipher import AES
requests.packages.urllib3.disable_warnings()
def check(q):
while True:
try:
c = q.get()
user = c.split(':')[0]
passw = c.split(':')[1]
work = False
proxy = {
'http': '127.0.0.1:8888',
'https': '127.0.0.1:8888'
}
s = requests.session()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36',
'Accept-Encoding': 'gzip',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest'
}
r = s.get(
'https://www.namecheap.com/Cart/ajax/DomainSelection.ashx?action=checkuser&username={0}'.format(user),
verify=False,
headers=headers,
proxies=proxy
)
if 'UserExist' in r.text:
print user, 'is registered!'
f = open("registered.txt", "a")
f.write('{0}\n'.format(c))
f.close()
else:
print user, 'does not work!'
except Exception, e:
print e
raw_input("Please Send Me The Error Message!")
q.task_done()
def main():
with codecs.open('tocheck.txt', 'r', encoding='utf-8') as f:
users = f.readlines()
with codecs.open('regthreads.txt', 'r', encoding='utf-8') as f:
threads = f.read()
queue = Queue.Queue()
for _ in range(int(threads)):
worker = Thread(target=check, args=(queue,))
worker.start()
for user in users:
queue.put(user.strip().encode('ascii', 'ignore'))
if __name__ == '__main__':
try:
key = os.environ['COMPUTERNAME']
f = open("data.txt", "r")
data = f.read()
f.close()
while len(key) < 32:
key += 'A'
IV = 16 * '\x00'
mode = AES.MODE_CBC
encryptor = AES.new(key, mode, IV=IV)
l = base64.b16encode(encryptor.encrypt(data))
r = requests.get(
'http://divcentral.xyz/login.php?l={0}&serial={1}'.format(urllib.quote_plus(l), data)
)
if encryptor.decrypt(base64.b16decode(urllib.unquote(r.text))):
main()
else:
print 'Could not log in!'
except Exception, e:
print 'Error! PM Me with the message!'
print e
raw_input()
|
normal
|
{
"blob_id": "b8ab6b8c111876d6a781c82438f79307a849c47a",
"index": 1353,
"step-1": "# -*- coding: utf-8 -*-\nimport requests\nimport Queue\nimport codecs\nimport os\nimport urllib\nimport base64\nfrom threading import Thread\nfrom Crypto.Cipher import AES\n\nrequests.packages.urllib3.disable_warnings()\n\n\ndef check(q):\n while True:\n try:\n c = q.get()\n user = c.split(':')[0]\n passw = c.split(':')[1]\n work = False\n proxy = {\n\t\t\t\t'http': '127.0.0.1:8888',\n\t\t\t\t'https': '127.0.0.1:8888'\n }\n s = requests.session()\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36',\n 'Accept-Encoding': 'gzip',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'X-Requested-With': 'XMLHttpRequest'\n }\n r = s.get(\n 'https://www.namecheap.com/Cart/ajax/DomainSelection.ashx?action=checkuser&username={0}'.format(user),\n verify=False,\n headers=headers,\n proxies=proxy\n )\n if 'UserExist' in r.text:\n print user, 'is registered!'\n f = open(\"registered.txt\", \"a\")\n f.write('{0}\\n'.format(c))\n f.close()\n else:\n print user, 'does not work!'\n except Exception, e:\n print e\n raw_input(\"Please Send Me The Error Message!\")\n q.task_done()\n \ndef main():\n with codecs.open('tocheck.txt', 'r', encoding='utf-8') as f:\n users = f.readlines()\n with codecs.open('regthreads.txt', 'r', encoding='utf-8') as f:\n threads = f.read()\n \n queue = Queue.Queue()\n for _ in range(int(threads)):\n worker = Thread(target=check, args=(queue,))\n worker.start()\n for user in users:\n queue.put(user.strip().encode('ascii', 'ignore'))\n \nif __name__ == '__main__':\n try:\n key = os.environ['COMPUTERNAME']\n f = open(\"data.txt\", \"r\")\n data = f.read()\n f.close()\n while len(key) < 32:\n key += 'A'\n IV = 16 * '\\x00'\n mode = AES.MODE_CBC\n encryptor = AES.new(key, mode, IV=IV)\n l = base64.b16encode(encryptor.encrypt(data))\n r = requests.get(\n 'http://divcentral.xyz/login.php?l={0}&serial={1}'.format(urllib.quote_plus(l), data)\n )\n if encryptor.decrypt(base64.b16decode(urllib.unquote(r.text))):\n main()\n else:\n print 'Could not log in!'\n except Exception, e:\n print 'Error! PM Me with the message!'\n print e\n raw_input()\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#! /usr/bin/python
#
# convert the swig -debug-lsymbols output text file format into
# a simple list of lua module names and classes
#
# Dan Wilcox <[email protected]> 2017
#
import sys
import re
if len(sys.argv) < 2:
print("USAGE: lua_syntax.py MODULENAME INFILE")
exit(0)
module = sys.argv[1]
infile = sys.argv[2]
sections = []
sectionMatches = [
"string", # swig std::string wrappers
"string.SwigStatic" # swig std::string wrappers
]
sectionEnds = [
"Vector" # swig std::vector wrappers
]
lineMatches = [
"string", # swig std::string wrappers
"lua:cdata", # c pointers
]
lineStarts = [
"~", # destructors
"__", # lua metatable __add, __sub, etc
"of", # of core type prefixes
"ofx" # ofx addon type prefixes
]
lineEnds = [
"Vector" # swig std::vector wrappers
]
# any other user-supplied section ignores
for arg in sys.argv[3:]:
sectionIgnores.append(arg)
# check if a string matches one in an array
def matches(needle, haystack):
for straw in haystack:
if needle == straw:
return True
return False
# append a section to the sections array if the name passes muster
def appendSection(section):
# drop static classes which don't have any symbols
if len(section) < 2:
return
# section names are followed by a " -", so double check
if not section[0].endswith("-"):
print("warning: section name does not end with -: "+section[0])
return
# grab first non-whitespace name ie. "Color" from "Color -"
match = re.match("\S+", section[0])
if match:
if section[0] == "-": # main module is just a "-"
section[0] = module
else: # class name
section[0] = match.group(0)
else:
print("warning: section name had no non-whitespace match: "+section[0])
return
# drop sections which match certain strings
if matches(section[0], sectionMatches):
return
# drop sections which contain certain strings
if any(section[0].endswith(x) for x in sectionEnds):
return
# if got this far, the section must be good...
sections.append(section)
# parse swig output into sections
file = open(infile)
section = []
for line in file:
# strip whitespace
line = line.strip()
# ignore beginning and end lines
if line.startswith("LANGUAGE"):
continue
# section headers are a series of = chars, ie. ==========
if line.startswith("="):
appendSection(section)
section = []
# append line within a section
else:
# empty line
if len(line) == 0:
continue
# drop lines with certain prefixes
if any(line.startswith(x) for x in lineStarts):
continue
# drop lines with certain suffixes
if any(line.endswith(x) for x in lineEnds):
continue
# drop lines which match certain strings
if matches(line, lineMatches):
continue
# line must be good
section.append(line)
appendSection(section) # catch any left overs
file.close()
section = []
# for section in sections:
# print(section)
# exit(0)
# output module & section names to each section line
file = open(module+"_syntax.txt", "w")
num = 0
for section in sections:
# grab name from first line and output
prefix = " "
name = section[0]
if name == module: # main module
prefix = module+"."
file.write(module+"\n")
elif name.endswith(".SwigStatic"): # static members
name = name.split(".")[0] # drop SwigStatic suffix
prefix = module+"."+name+"."
else: # class instance members
file.write(module+"."+name+"\n")
# sort remaining lines
lines = section[1:]
lines.sort()
# output with module.class prefix
for line in lines:
if not line.endswith(".SwigStatic"): # drop statics from main module
file.write(prefix+line+"\n")
num = num + 1
# linebreak between sections
if num < len(sections):
file.write("\n")
file.close()
|
normal
|
{
"blob_id": "c712875273f988a3aa6dab61f79e99a077823060",
"index": 807,
"step-1": "<mask token>\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\n<mask token>\n",
"step-2": "<mask token>\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\n<mask token>\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\n<mask token>\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\n<mask token>\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-3": "<mask token>\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = ['string', 'string.SwigStatic']\nsectionEnds = ['Vector']\nlineMatches = ['string', 'lua:cdata']\nlineStarts = ['~', '__', 'of', 'ofx']\nlineEnds = ['Vector']\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\nfile = open(infile)\nsection = []\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\nsection = []\nfile = open(module + '_syntax.txt', 'w')\nnum = 0\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-4": "import sys\nimport re\nif len(sys.argv) < 2:\n print('USAGE: lua_syntax.py MODULENAME INFILE')\n exit(0)\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = ['string', 'string.SwigStatic']\nsectionEnds = ['Vector']\nlineMatches = ['string', 'lua:cdata']\nlineStarts = ['~', '__', 'of', 'ofx']\nlineEnds = ['Vector']\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n\ndef appendSection(section):\n if len(section) < 2:\n return\n if not section[0].endswith('-'):\n print('warning: section name does not end with -: ' + section[0])\n return\n match = re.match('\\\\S+', section[0])\n if match:\n if section[0] == '-':\n section[0] = module\n else:\n section[0] = match.group(0)\n else:\n print('warning: section name had no non-whitespace match: ' +\n section[0])\n return\n if matches(section[0], sectionMatches):\n return\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n sections.append(section)\n\n\nfile = open(infile)\nsection = []\nfor line in file:\n line = line.strip()\n if line.startswith('LANGUAGE'):\n continue\n if line.startswith('='):\n appendSection(section)\n section = []\n else:\n if len(line) == 0:\n continue\n if any(line.startswith(x) for x in lineStarts):\n continue\n if any(line.endswith(x) for x in lineEnds):\n continue\n if matches(line, lineMatches):\n continue\n section.append(line)\nappendSection(section)\nfile.close()\nsection = []\nfile = open(module + '_syntax.txt', 'w')\nnum = 0\nfor section in sections:\n prefix = ' '\n name = section[0]\n if name == module:\n prefix = module + '.'\n file.write(module + '\\n')\n elif name.endswith('.SwigStatic'):\n name = name.split('.')[0]\n prefix = module + '.' + name + '.'\n else:\n file.write(module + '.' + name + '\\n')\n lines = section[1:]\n lines.sort()\n for line in lines:\n if not line.endswith('.SwigStatic'):\n file.write(prefix + line + '\\n')\n num = num + 1\n if num < len(sections):\n file.write('\\n')\nfile.close()\n",
"step-5": "#! /usr/bin/python\n#\n# convert the swig -debug-lsymbols output text file format into\n# a simple list of lua module names and classes\n#\n# Dan Wilcox <[email protected]> 2017\n#\nimport sys\nimport re\n\nif len(sys.argv) < 2:\n print(\"USAGE: lua_syntax.py MODULENAME INFILE\")\n exit(0)\n\nmodule = sys.argv[1]\ninfile = sys.argv[2]\nsections = []\nsectionMatches = [\n \"string\", # swig std::string wrappers\n \"string.SwigStatic\" # swig std::string wrappers\n]\nsectionEnds = [\n \"Vector\" # swig std::vector wrappers\n]\nlineMatches = [ \n \"string\", # swig std::string wrappers\n \"lua:cdata\", # c pointers\n]\nlineStarts = [\n \"~\", # destructors\n \"__\", # lua metatable __add, __sub, etc\n \"of\", # of core type prefixes\n \"ofx\" # ofx addon type prefixes\n]\nlineEnds = [\n \"Vector\" # swig std::vector wrappers\n]\n\n# any other user-supplied section ignores\nfor arg in sys.argv[3:]:\n sectionIgnores.append(arg)\n\n# check if a string matches one in an array\ndef matches(needle, haystack):\n for straw in haystack:\n if needle == straw:\n return True\n return False\n\n# append a section to the sections array if the name passes muster\ndef appendSection(section):\n # drop static classes which don't have any symbols\n if len(section) < 2:\n return\n # section names are followed by a \" -\", so double check\n if not section[0].endswith(\"-\"):\n print(\"warning: section name does not end with -: \"+section[0])\n return\n # grab first non-whitespace name ie. \"Color\" from \"Color -\"\n match = re.match(\"\\S+\", section[0])\n if match:\n if section[0] == \"-\": # main module is just a \"-\"\n section[0] = module\n else: # class name\n section[0] = match.group(0)\n else:\n print(\"warning: section name had no non-whitespace match: \"+section[0])\n return\n # drop sections which match certain strings\n if matches(section[0], sectionMatches):\n return\n # drop sections which contain certain strings\n if any(section[0].endswith(x) for x in sectionEnds):\n return\n # if got this far, the section must be good...\n sections.append(section)\n\n# parse swig output into sections\nfile = open(infile)\nsection = []\nfor line in file:\n # strip whitespace\n line = line.strip()\n # ignore beginning and end lines\n if line.startswith(\"LANGUAGE\"):\n continue\n # section headers are a series of = chars, ie. ==========\n if line.startswith(\"=\"):\n appendSection(section)\n section = []\n # append line within a section\n else:\n # empty line\n if len(line) == 0:\n continue\n # drop lines with certain prefixes\n if any(line.startswith(x) for x in lineStarts):\n continue\n # drop lines with certain suffixes\n if any(line.endswith(x) for x in lineEnds):\n continue\n # drop lines which match certain strings\n if matches(line, lineMatches):\n continue\n # line must be good\n section.append(line)\nappendSection(section) # catch any left overs\nfile.close()\nsection = []\n\n# for section in sections:\n# print(section)\n# exit(0)\n\n# output module & section names to each section line\nfile = open(module+\"_syntax.txt\", \"w\")\nnum = 0\nfor section in sections:\n\n # grab name from first line and output\n prefix = \" \"\n name = section[0]\n if name == module: # main module\n prefix = module+\".\"\n file.write(module+\"\\n\")\n elif name.endswith(\".SwigStatic\"): # static members\n name = name.split(\".\")[0] # drop SwigStatic suffix\n prefix = module+\".\"+name+\".\"\n else: # class instance members\n file.write(module+\".\"+name+\"\\n\")\n\n # sort remaining lines\n lines = section[1:]\n lines.sort()\n\n # output with module.class prefix\n for line in lines:\n if not line.endswith(\".SwigStatic\"): # drop statics from main module\n file.write(prefix+line+\"\\n\")\n num = num + 1\n\n # linebreak between sections\n if num < len(sections):\n file.write(\"\\n\")\nfile.close()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class RosEnvImg(RosEnvAbs):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_observation_(self):
"""
Function returns state that will be fed to the rl-agent
It includes
the laserscan and the waypoint information stored in an image.
:return: state
"""
obs = np.zeros(self.STATE_SIZE, dtype=np.float)
obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.
STATE_SIZE[0:2])
if self.debug_:
self.debugger_.show_input_occ_grid(self.input_img_)
self.debugger_.show_input_image(obs[:, :, 0])
return obs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RosEnvImg(RosEnvAbs):
<|reserved_special_token_0|>
def __init__(self, ns, state_collector, execution_mode, task_mode,
state_size, observation_space, stack_offset, action_size,
action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):
state_collector.set_state_mode(0)
super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,
task_mode, state_size, observation_space, stack_offset,
action_size, action_space, debug, goal_radius, wp_radius,
robot_radius, reward_fnc)
def get_observation_(self):
"""
Function returns state that will be fed to the rl-agent
It includes
the laserscan and the waypoint information stored in an image.
:return: state
"""
obs = np.zeros(self.STATE_SIZE, dtype=np.float)
obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.
STATE_SIZE[0:2])
if self.debug_:
self.debugger_.show_input_occ_grid(self.input_img_)
self.debugger_.show_input_image(obs[:, :, 0])
return obs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RosEnvImg(RosEnvAbs):
"""
This (abstract) class is a simulation environment wrapper for
the X-Image Representation.
"""
def __init__(self, ns, state_collector, execution_mode, task_mode,
state_size, observation_space, stack_offset, action_size,
action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):
state_collector.set_state_mode(0)
super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,
task_mode, state_size, observation_space, stack_offset,
action_size, action_space, debug, goal_radius, wp_radius,
robot_radius, reward_fnc)
def get_observation_(self):
"""
Function returns state that will be fed to the rl-agent
It includes
the laserscan and the waypoint information stored in an image.
:return: state
"""
obs = np.zeros(self.STATE_SIZE, dtype=np.float)
obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.
STATE_SIZE[0:2])
if self.debug_:
self.debugger_.show_input_occ_grid(self.input_img_)
self.debugger_.show_input_image(obs[:, :, 0])
return obs
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
from rl_agent.env_wrapper.ros_env import RosEnvAbs
import rospy
class RosEnvImg(RosEnvAbs):
"""
This (abstract) class is a simulation environment wrapper for
the X-Image Representation.
"""
def __init__(self, ns, state_collector, execution_mode, task_mode,
state_size, observation_space, stack_offset, action_size,
action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):
state_collector.set_state_mode(0)
super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,
task_mode, state_size, observation_space, stack_offset,
action_size, action_space, debug, goal_radius, wp_radius,
robot_radius, reward_fnc)
def get_observation_(self):
"""
Function returns state that will be fed to the rl-agent
It includes
the laserscan and the waypoint information stored in an image.
:return: state
"""
obs = np.zeros(self.STATE_SIZE, dtype=np.float)
obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.
STATE_SIZE[0:2])
if self.debug_:
self.debugger_.show_input_occ_grid(self.input_img_)
self.debugger_.show_input_image(obs[:, :, 0])
return obs
<|reserved_special_token_1|>
'''
@name: ros_env_img.py
@brief: This (abstract) class is a simulation environment wrapper for
the X-Image Representation.
@author: Ronja Gueldenring
@version: 3.5
@date: 2019/04/05
'''
# python relevant
import numpy as np
# custom classes
from rl_agent.env_wrapper.ros_env import RosEnvAbs
# ros-relevant
import rospy
class RosEnvImg(RosEnvAbs):
'''
This (abstract) class is a simulation environment wrapper for
the X-Image Representation.
'''
def __init__(self, ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):
state_collector.set_state_mode(0)
super(RosEnvImg, self).__init__(ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc)
def get_observation_(self):
"""
Function returns state that will be fed to the rl-agent
It includes
the laserscan and the waypoint information stored in an image.
:return: state
"""
obs = np.zeros(self.STATE_SIZE, dtype=np.float)
obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))
if self.debug_:
self.debugger_.show_input_occ_grid(self.input_img_)
self.debugger_.show_input_image(obs[:,:,0])
return obs
|
flexible
|
{
"blob_id": "1a979933eb02e9d12dc034021448cbade59abc48",
"index": 2585,
"step-1": "<mask token>\n\n\nclass RosEnvImg(RosEnvAbs):\n <mask token>\n <mask token>\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the laserscan and the waypoint information stored in an image.\n :return: state\n \"\"\"\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.\n STATE_SIZE[0:2])\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:, :, 0])\n return obs\n",
"step-2": "<mask token>\n\n\nclass RosEnvImg(RosEnvAbs):\n <mask token>\n\n def __init__(self, ns, state_collector, execution_mode, task_mode,\n state_size, observation_space, stack_offset, action_size,\n action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(0)\n super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,\n task_mode, state_size, observation_space, stack_offset,\n action_size, action_space, debug, goal_radius, wp_radius,\n robot_radius, reward_fnc)\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the laserscan and the waypoint information stored in an image.\n :return: state\n \"\"\"\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.\n STATE_SIZE[0:2])\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:, :, 0])\n return obs\n",
"step-3": "<mask token>\n\n\nclass RosEnvImg(RosEnvAbs):\n \"\"\"\n This (abstract) class is a simulation environment wrapper for\n the X-Image Representation.\n \"\"\"\n\n def __init__(self, ns, state_collector, execution_mode, task_mode,\n state_size, observation_space, stack_offset, action_size,\n action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(0)\n super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,\n task_mode, state_size, observation_space, stack_offset,\n action_size, action_space, debug, goal_radius, wp_radius,\n robot_radius, reward_fnc)\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the laserscan and the waypoint information stored in an image.\n :return: state\n \"\"\"\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.\n STATE_SIZE[0:2])\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:, :, 0])\n return obs\n",
"step-4": "<mask token>\nimport numpy as np\nfrom rl_agent.env_wrapper.ros_env import RosEnvAbs\nimport rospy\n\n\nclass RosEnvImg(RosEnvAbs):\n \"\"\"\n This (abstract) class is a simulation environment wrapper for\n the X-Image Representation.\n \"\"\"\n\n def __init__(self, ns, state_collector, execution_mode, task_mode,\n state_size, observation_space, stack_offset, action_size,\n action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(0)\n super(RosEnvImg, self).__init__(ns, state_collector, execution_mode,\n task_mode, state_size, observation_space, stack_offset,\n action_size, action_space, debug, goal_radius, wp_radius,\n robot_radius, reward_fnc)\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the laserscan and the waypoint information stored in an image.\n :return: state\n \"\"\"\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:, :, 0] = np.array(self.input_img_.data).reshape(self.\n STATE_SIZE[0:2])\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:, :, 0])\n return obs\n",
"step-5": "'''\n @name: ros_env_img.py\n @brief: This (abstract) class is a simulation environment wrapper for\n the X-Image Representation.\n @author: Ronja Gueldenring\n @version: 3.5\n @date: 2019/04/05\n'''\n\n\n# python relevant\nimport numpy as np\n\n# custom classes\nfrom rl_agent.env_wrapper.ros_env import RosEnvAbs\n\n# ros-relevant\nimport rospy\n\nclass RosEnvImg(RosEnvAbs):\n '''\n This (abstract) class is a simulation environment wrapper for\n the X-Image Representation.\n '''\n def __init__(self, ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc):\n state_collector.set_state_mode(0)\n super(RosEnvImg, self).__init__(ns, state_collector, execution_mode, task_mode, state_size, observation_space, stack_offset, action_size, action_space, debug, goal_radius, wp_radius, robot_radius, reward_fnc)\n\n\n def get_observation_(self):\n \"\"\"\n Function returns state that will be fed to the rl-agent\n It includes\n the laserscan and the waypoint information stored in an image.\n :return: state\n \"\"\"\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))\n\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:,:,0])\n return obs\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('contract_abi.json') as f:
info_json = json.load(f)
<|reserved_special_token_0|>
print(abi)
print(myfilter)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('contract_abi.json') as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address=
'0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=
16147303)
print(abi)
print(myfilter)
<|reserved_special_token_1|>
from web3.auto.infura import w3
import json
import os
with open('contract_abi.json') as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address=
'0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=
16147303)
print(abi)
print(myfilter)
<|reserved_special_token_1|>
from web3.auto.infura import w3
import json
import os
with open("contract_abi.json") as f:
info_json = json.load(f)
abi = info_json
mycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)
myfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)
#myfilter.fromBlock = "16181508"
#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})
print(abi)
print (myfilter)
|
flexible
|
{
"blob_id": "8921c0a17e90f7113d1e0be630a15fc9d74d1780",
"index": 8519,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\n<mask token>\nprint(abi)\nprint(myfilter)\n",
"step-3": "<mask token>\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-4": "from web3.auto.infura import w3\nimport json\nimport os\nwith open('contract_abi.json') as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address=\n '0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=\n 16147303)\nprint(abi)\nprint(myfilter)\n",
"step-5": "from web3.auto.infura import w3\nimport json\nimport os\n\nwith open(\"contract_abi.json\") as f:\n info_json = json.load(f)\nabi = info_json\nmycontract = w3.eth.contract(address='0x091FDeb7990D3E00d13c31b81841d56b33164AD7', abi=abi)\nmyfilter = mycontract.events.currentResponderState.createFilter(fromBlock=16147303)\n#myfilter.fromBlock = \"16181508\"\n#mycontract.eventFilter('currentResponderState', {'fromBlock': 16181508,'toBlock': 'latest'})\nprint(abi)\nprint (myfilter)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import asyncio
from . import edit_or_reply, udy
plugin_category = "utils"
@udy.cod_cmd(
pattern="as$",
command=("as", plugin_category),
info={
"header": "salam.",
"usage": "{tr}as",
},
)
async def _(event):
"animation command"
event = await edit_or_reply(event, "as")
await event.edit("yuuhuuuu")
await asyncio.sleep(2)
await event.edit("Assalamualaikum wr. wb.")
@udy.cod_cmd(
pattern="ws$",
command=("ws", plugin_category),
info={
"header": "answer the salam.",
"usage": "{tr}ws",
},
)
async def _(event):
"animation command"
event = await edit_or_reply(event, "ws")
await event.edit("huuyyyy")
await asyncio.sleep(2)
await event.edit("Waalaikum salam wr. wb.")
|
normal
|
{
"blob_id": "a78bbb85f4912e5f7ea23f689de65cb16a38d814",
"index": 9787,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_cmd(pattern='as$', command=('as', plugin_category), info={'header':\n 'salam.', 'usage': '{tr}as'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'as')\n await event.edit('yuuhuuuu')\n await asyncio.sleep(2)\n await event.edit('Assalamualaikum wr. wb.')\n\n\[email protected]_cmd(pattern='ws$', command=('ws', plugin_category), info={'header':\n 'answer the salam.', 'usage': '{tr}ws'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'ws')\n await event.edit('huuyyyy')\n await asyncio.sleep(2)\n await event.edit('Waalaikum salam wr. wb.')\n",
"step-3": "<mask token>\nplugin_category = 'utils'\n\n\[email protected]_cmd(pattern='as$', command=('as', plugin_category), info={'header':\n 'salam.', 'usage': '{tr}as'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'as')\n await event.edit('yuuhuuuu')\n await asyncio.sleep(2)\n await event.edit('Assalamualaikum wr. wb.')\n\n\[email protected]_cmd(pattern='ws$', command=('ws', plugin_category), info={'header':\n 'answer the salam.', 'usage': '{tr}ws'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'ws')\n await event.edit('huuyyyy')\n await asyncio.sleep(2)\n await event.edit('Waalaikum salam wr. wb.')\n",
"step-4": "import asyncio\nfrom . import edit_or_reply, udy\nplugin_category = 'utils'\n\n\[email protected]_cmd(pattern='as$', command=('as', plugin_category), info={'header':\n 'salam.', 'usage': '{tr}as'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'as')\n await event.edit('yuuhuuuu')\n await asyncio.sleep(2)\n await event.edit('Assalamualaikum wr. wb.')\n\n\[email protected]_cmd(pattern='ws$', command=('ws', plugin_category), info={'header':\n 'answer the salam.', 'usage': '{tr}ws'})\nasync def _(event):\n \"\"\"animation command\"\"\"\n event = await edit_or_reply(event, 'ws')\n await event.edit('huuyyyy')\n await asyncio.sleep(2)\n await event.edit('Waalaikum salam wr. wb.')\n",
"step-5": "import asyncio\n\nfrom . import edit_or_reply, udy\n\nplugin_category = \"utils\"\n\n\[email protected]_cmd(\n pattern=\"as$\",\n command=(\"as\", plugin_category),\n info={\n \"header\": \"salam.\",\n \"usage\": \"{tr}as\",\n },\n)\nasync def _(event):\n \"animation command\"\n event = await edit_or_reply(event, \"as\")\n await event.edit(\"yuuhuuuu\")\n await asyncio.sleep(2)\n await event.edit(\"Assalamualaikum wr. wb.\")\n\n\[email protected]_cmd(\n pattern=\"ws$\",\n command=(\"ws\", plugin_category),\n info={\n \"header\": \"answer the salam.\",\n \"usage\": \"{tr}ws\",\n },\n)\nasync def _(event):\n \"animation command\"\n event = await edit_or_reply(event, \"ws\")\n await event.edit(\"huuyyyy\")\n await asyncio.sleep(2)\n await event.edit(\"Waalaikum salam wr. wb.\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def f(n):
if n % 2 == 0:
sum = 0
for x in range(2, n + 1, 2):
sum += 1 / x
print(sum)
if n % 2 != 0:
sum = 0
for x in range(1, n + 1, 2):
sum += 1 / x
print(sum)
<|reserved_special_token_1|>
'''
3、 编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n
'''
def f(n):
if n%2==0:
sum=0
for x in range(2,n+1,2):
sum+=1/x
print(sum)
if n%2!=0:
sum=0
for x in range(1,n+1,2):
sum+=1/x
print(sum)
|
flexible
|
{
"blob_id": "69cf28d32e6543271a0855d61a76808b03c06891",
"index": 4805,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(n):\n if n % 2 == 0:\n sum = 0\n for x in range(2, n + 1, 2):\n sum += 1 / x\n print(sum)\n if n % 2 != 0:\n sum = 0\n for x in range(1, n + 1, 2):\n sum += 1 / x\n print(sum)\n",
"step-3": "'''\n3、\t编写一个函数,输入n为偶数时,调用函数求1/2+1/4+...+1/n,当输入n为奇数时,调用函数1/1+1/3+...+1/n\n'''\n\ndef f(n):\n if n%2==0:\n sum=0\n for x in range(2,n+1,2):\n sum+=1/x\n print(sum)\n if n%2!=0:\n sum=0\n for x in range(1,n+1,2):\n sum+=1/x\n print(sum)\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(s + 2 - s % 2)
<|reserved_special_token_1|>
s = int(input())
print(s + 2 - s % 2)
<|reserved_special_token_1|>
s=int(input())
print(s+2-(s%2))
|
flexible
|
{
"blob_id": "0412369f89842e2f55aa115e63f46a1b71a0f322",
"index": 2685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s + 2 - s % 2)\n",
"step-3": "s = int(input())\nprint(s + 2 - s % 2)\n",
"step-4": "s=int(input())\nprint(s+2-(s%2))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
if __name__ == '__main__':
import sys
import os.path
srcpath = sys.argv[1] if len(sys.argv) >= 1 else './'
verfn = sys.argv[2] if len(sys.argv) >= 2 else None
try :
with open(os.path.join(srcpath,'.svn/entries'),'r') as fp:
x = fp.read().splitlines()[3]
if verfn :
with open(verfn,'w') as fp :
fp.write(x)
else :
sys.stdout.write(x)
except IOError, e :
import traceback
traceback.print_exc()
pass
|
normal
|
{
"blob_id": "1ebf92cf40053e561b04a666eb1dd36f54999e2c",
"index": 7324,
"step-1": "\r\n\r\n\r\nif __name__ == '__main__':\r\n \r\n import sys\r\n import os.path\r\n \r\n srcpath = sys.argv[1] if len(sys.argv) >= 1 else './'\r\n verfn = sys.argv[2] if len(sys.argv) >= 2 else None\r\n \r\n try :\r\n \r\n with open(os.path.join(srcpath,'.svn/entries'),'r') as fp:\r\n x = fp.read().splitlines()[3]\r\n \r\n if verfn :\r\n with open(verfn,'w') as fp :\r\n fp.write(x)\r\n else :\r\n sys.stdout.write(x)\r\n \r\n except IOError, e :\r\n \r\n import traceback\r\n traceback.print_exc()\r\n \r\n pass\r\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-08-26 21:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('exchange', '0004_auto_20170826_2120'),
]
operations = [
migrations.AlterModelOptions(
name='type',
options={'verbose_name': '\u0442\u0438\u043f \u0437\u0430\u0434\u0430\u043d\u0438\u044f', 'verbose_name_plural': '\u0422\u0438\u043f\u044b \u0437\u0430\u0434\u0430\u043d\u0438\u044f'},
),
migrations.AlterField(
model_name='task',
name='count',
field=models.IntegerField(default=0, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0432\u044b\u043f\u043e\u043b\u043d\u0435\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='max_count',
field=models.IntegerField(default=1, verbose_name='\u041a\u043e\u043b\u0438\u0447\u0435\u0441\u0442\u0432\u043e \u0437\u0430\u043f\u043b\u0430\u043d\u0438\u0440\u043e\u0432\u0430\u043d\u043d\u044b\u0445 \u0434\u0435\u0439\u0441\u0442\u0432\u0438\u0439'),
),
migrations.AlterField(
model_name='task',
name='status',
field=models.CharField(choices=[('NEW', '\u041d\u043e\u0432\u0430\u044f'), ('CNF', '\u041f\u043e\u0434\u0442\u0432\u0435\u0440\u0436\u0434\u0435\u043d\u0430'), ('Y', '\u0410\u043a\u0442\u0438\u0432\u043d\u0430'), ('BLC', '\u0417\u0430\u0431\u043b\u043e\u043a\u0438\u0440\u043e\u0432\u0430\u043d\u0430 \u043c\u043e\u0434\u0435\u0440\u0430\u0442\u043e\u0440\u043e\u043c'), ('DEL', '\u0423\u0434\u0430\u043b\u0435\u043d\u043e'), ('DON', '\u0417\u0430\u0432\u0435\u0440\u0448\u0435\u043d\u043e')], default='NEW', max_length=3),
),
]
|
normal
|
{
"blob_id": "264896da4d92797b9f31e28c19a2e315efff815a",
"index": 138,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('exchange', '0004_auto_20170826_2120')]\n operations = [migrations.AlterModelOptions(name='type', options={\n 'verbose_name': 'тип задания', 'verbose_name_plural':\n 'Типы задания'}), migrations.AlterField(model_name='task', name=\n 'count', field=models.IntegerField(default=0, verbose_name=\n 'Количество выполненных действий')), migrations.AlterField(\n model_name='task', name='max_count', field=models.IntegerField(\n default=1, verbose_name='Количество запланированных действий')),\n migrations.AlterField(model_name='task', name='status', field=\n models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),\n ('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',\n 'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('exchange', '0004_auto_20170826_2120')]\n operations = [migrations.AlterModelOptions(name='type', options={\n 'verbose_name': 'тип задания', 'verbose_name_plural':\n 'Типы задания'}), migrations.AlterField(model_name='task', name=\n 'count', field=models.IntegerField(default=0, verbose_name=\n 'Количество выполненных действий')), migrations.AlterField(\n model_name='task', name='max_count', field=models.IntegerField(\n default=1, verbose_name='Количество запланированных действий')),\n migrations.AlterField(model_name='task', name='status', field=\n models.CharField(choices=[('NEW', 'Новая'), ('CNF', 'Подтверждена'),\n ('Y', 'Активна'), ('BLC', 'Заблокирована модератором'), ('DEL',\n 'Удалено'), ('DON', 'Завершено')], default='NEW', max_length=3))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.10.5 on 2017-08-26 21:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('exchange', '0004_auto_20170826_2120'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='type',\n options={'verbose_name': '\\u0442\\u0438\\u043f \\u0437\\u0430\\u0434\\u0430\\u043d\\u0438\\u044f', 'verbose_name_plural': '\\u0422\\u0438\\u043f\\u044b \\u0437\\u0430\\u0434\\u0430\\u043d\\u0438\\u044f'},\n ),\n migrations.AlterField(\n model_name='task',\n name='count',\n field=models.IntegerField(default=0, verbose_name='\\u041a\\u043e\\u043b\\u0438\\u0447\\u0435\\u0441\\u0442\\u0432\\u043e \\u0432\\u044b\\u043f\\u043e\\u043b\\u043d\\u0435\\u043d\\u043d\\u044b\\u0445 \\u0434\\u0435\\u0439\\u0441\\u0442\\u0432\\u0438\\u0439'),\n ),\n migrations.AlterField(\n model_name='task',\n name='max_count',\n field=models.IntegerField(default=1, verbose_name='\\u041a\\u043e\\u043b\\u0438\\u0447\\u0435\\u0441\\u0442\\u0432\\u043e \\u0437\\u0430\\u043f\\u043b\\u0430\\u043d\\u0438\\u0440\\u043e\\u0432\\u0430\\u043d\\u043d\\u044b\\u0445 \\u0434\\u0435\\u0439\\u0441\\u0442\\u0432\\u0438\\u0439'),\n ),\n migrations.AlterField(\n model_name='task',\n name='status',\n field=models.CharField(choices=[('NEW', '\\u041d\\u043e\\u0432\\u0430\\u044f'), ('CNF', '\\u041f\\u043e\\u0434\\u0442\\u0432\\u0435\\u0440\\u0436\\u0434\\u0435\\u043d\\u0430'), ('Y', '\\u0410\\u043a\\u0442\\u0438\\u0432\\u043d\\u0430'), ('BLC', '\\u0417\\u0430\\u0431\\u043b\\u043e\\u043a\\u0438\\u0440\\u043e\\u0432\\u0430\\u043d\\u0430 \\u043c\\u043e\\u0434\\u0435\\u0440\\u0430\\u0442\\u043e\\u0440\\u043e\\u043c'), ('DEL', '\\u0423\\u0434\\u0430\\u043b\\u0435\\u043d\\u043e'), ('DON', '\\u0417\\u0430\\u0432\\u0435\\u0440\\u0448\\u0435\\u043d\\u043e')], default='NEW', max_length=3),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from trips.models import Post
admin.site.register(Post)
|
normal
|
{
"blob_id": "a8197a4f0bb84e734696bf43fa976c76732d75b8",
"index": 9863,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Post)\n",
"step-3": "from django.contrib import admin\nfrom trips.models import Post\nadmin.site.register(Post)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'UTF-8').encode(), addr))
<|reserved_special_token_0|>
server.set_debuglevel(1)
server.login()
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'UTF-8').encode(), addr))
from_addr = '[email protected]'
to_addr = '[email protected]'
smtp_server = 'smtp.163.com'
passwd = input('Password: ')
msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
msg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)
msg['To'] = _format_addr('开发者<%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login()
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
<|reserved_special_token_1|>
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'UTF-8').encode(), addr))
from_addr = '[email protected]'
to_addr = '[email protected]'
smtp_server = 'smtp.163.com'
passwd = input('Password: ')
msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
msg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)
msg['To'] = _format_addr('开发者<%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login()
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
|
flexible
|
{
"blob_id": "4dd71d01e499f3d0ee49d3bf5204fb3bbb03ede5",
"index": 2976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\n<mask token>\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-3": "<mask token>\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\nfrom_addr = '[email protected]'\nto_addr = '[email protected]'\nsmtp_server = 'smtp.163.com'\npasswd = input('Password: ')\nmsg = MIMEText('hello, send by Python...', 'plain', 'utf-8')\nmsg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)\nmsg['To'] = _format_addr('开发者<%s>' % to_addr)\nmsg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-4": "from email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.utils import parseaddr, formataddr\nimport smtplib\n\n\ndef _format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'UTF-8').encode(), addr))\n\n\nfrom_addr = '[email protected]'\nto_addr = '[email protected]'\nsmtp_server = 'smtp.163.com'\npasswd = input('Password: ')\nmsg = MIMEText('hello, send by Python...', 'plain', 'utf-8')\nmsg['From'] = _format_addr('Python 爱好者<%s>' % from_addr)\nmsg['To'] = _format_addr('开发者<%s>' % to_addr)\nmsg['Subject'] = Header('来自SMTP的邮件...', 'utf-8').encode()\nserver = smtplib.SMTP(smtp_server, 25)\nserver.set_debuglevel(1)\nserver.login()\nserver.sendmail(from_addr, [to_addr], msg.as_string())\nserver.quit()\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
from simple_avk.AVK import SimpleAVK
from simple_avk.exceptions import MethodError, LongpollError
|
normal
|
{
"blob_id": "2bccfba2448059a41185b117b224813e344b50f8",
"index": 5673,
"step-1": "<mask token>\n",
"step-2": "from simple_avk.AVK import SimpleAVK\nfrom simple_avk.exceptions import MethodError, LongpollError\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class FssFuzzHash:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('./dao')
<|reserved_special_token_0|>
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
<|reserved_special_token_1|>
import sys
sys.path.append('./dao')
from fss_data_fuzzhash_dao import *
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# project: fshell
# author: s0nnet
# time: 2017-01-08
# desc: data_fuzzhash
import sys
sys.path.append("./dao")
from fss_data_fuzzhash_dao import *
class FssFuzzHash:
@staticmethod
def insert_node(agent_id, data):
return FssFuzzHashDao.insert_node(agent_id, data)
|
flexible
|
{
"blob_id": "398f9f52b83ffddfb452abbeaad2e83610580fee",
"index": 9763,
"step-1": "<mask token>\n\n\nclass FssFuzzHash:\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-3": "<mask token>\nsys.path.append('./dao')\n<mask token>\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-4": "import sys\nsys.path.append('./dao')\nfrom fss_data_fuzzhash_dao import *\n\n\nclass FssFuzzHash:\n\n @staticmethod\n def insert_node(agent_id, data):\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# project: fshell\n# author: s0nnet\n# time: 2017-01-08\n# desc: data_fuzzhash\n\n\nimport sys\nsys.path.append(\"./dao\")\nfrom fss_data_fuzzhash_dao import *\n\n\nclass FssFuzzHash:\n \n @staticmethod\n def insert_node(agent_id, data):\n\n return FssFuzzHashDao.insert_node(agent_id, data)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
sentence = "Practice Problems to Drill List Comprehension in Your Head."
sentence = sentence.split()
sentence = [i.replace(".", "") for i in sentence]
[print(i) for i in sentence if len(i)<5]
|
normal
|
{
"blob_id": "c0e349be45cd964e8e398baaed64eae792189dd1",
"index": 5723,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n[print(i) for i in sentence if len(i) < 5]\n",
"step-3": "sentence = 'Practice Problems to Drill List Comprehension in Your Head.'\nsentence = sentence.split()\nsentence = [i.replace('.', '') for i in sentence]\n[print(i) for i in sentence if len(i) < 5]\n",
"step-4": "sentence = \"Practice Problems to Drill List Comprehension in Your Head.\"\nsentence = sentence.split()\nsentence = [i.replace(\".\", \"\") for i in sentence]\n[print(i) for i in sentence if len(i)<5]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ClassLevel',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('level', models.CharField(unique=True, max_length=100)),
],
),
migrations.CreateModel(
name='CourseRecord',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Course_Title', models.CharField(unique=True, max_length=50)),
('Course_Code', models.CharField(unique=True, max_length=10)),
('Course_Unit', models.PositiveSmallIntegerField()),
('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),
('level', models.ForeignKey(to='Qbank.ClassLevel')),
],
),
migrations.CreateModel(
name='QuestionBank',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('CourseTitle', models.CharField(max_length=50)),
('CourseCode', models.CharField(max_length=10)),
('CourseUnit', models.IntegerField()),
('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),
('Date', models.DateField()),
('question_papers', models.FileField(upload_to='QuestionPapers')),
('level', models.ForeignKey(to='Qbank.ClassLevel')),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Account_Type', models.CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length=1, default='S')),
('Upload_Picture', models.ImageField(upload_to='profile_images', blank=True)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "ab5400f4b44a53cb5cc2f6394bcdb8f55fd218f0",
"index": 1813,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('level', models.CharField(unique=True,\n max_length=100))]), migrations.CreateModel(name='CourseRecord',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Course_Title', models.\n CharField(unique=True, max_length=50)), ('Course_Code', models.\n CharField(unique=True, max_length=10)), ('Course_Unit', models.\n PositiveSmallIntegerField()), ('Semester', models.CharField(choices\n =[('First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.\n CreateModel(name='QuestionBank', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('CourseTitle', models.CharField(max_length=50)), (\n 'CourseCode', models.CharField(max_length=10)), ('CourseUnit',\n models.IntegerField()), ('Semester', models.CharField(choices=[(\n 'First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'Date', models.DateField()), ('question_papers', models.FileField(\n upload_to='QuestionPapers')), ('level', models.ForeignKey(to=\n 'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Account_Type', models.\n CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length\n =1, default='S')), ('Upload_Picture', models.ImageField(upload_to=\n 'profile_images', blank=True)), ('user', models.OneToOneField(to=\n settings.AUTH_USER_MODEL))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='ClassLevel', fields=[('id',\n models.AutoField(verbose_name='ID', serialize=False, auto_created=\n True, primary_key=True)), ('level', models.CharField(unique=True,\n max_length=100))]), migrations.CreateModel(name='CourseRecord',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Course_Title', models.\n CharField(unique=True, max_length=50)), ('Course_Code', models.\n CharField(unique=True, max_length=10)), ('Course_Unit', models.\n PositiveSmallIntegerField()), ('Semester', models.CharField(choices\n =[('First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'level', models.ForeignKey(to='Qbank.ClassLevel'))]), migrations.\n CreateModel(name='QuestionBank', fields=[('id', models.AutoField(\n verbose_name='ID', serialize=False, auto_created=True, primary_key=\n True)), ('CourseTitle', models.CharField(max_length=50)), (\n 'CourseCode', models.CharField(max_length=10)), ('CourseUnit',\n models.IntegerField()), ('Semester', models.CharField(choices=[(\n 'First_Semester', 'First_Semester'), ('Second_Semester',\n 'Second_Semester')], max_length=20, default='Select_Semester')), (\n 'Date', models.DateField()), ('question_papers', models.FileField(\n upload_to='QuestionPapers')), ('level', models.ForeignKey(to=\n 'Qbank.ClassLevel'))]), migrations.CreateModel(name='UserProfile',\n fields=[('id', models.AutoField(verbose_name='ID', serialize=False,\n auto_created=True, primary_key=True)), ('Account_Type', models.\n CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length\n =1, default='S')), ('Upload_Picture', models.ImageField(upload_to=\n 'profile_images', blank=True)), ('user', models.OneToOneField(to=\n settings.AUTH_USER_MODEL))])]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ClassLevel',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('level', models.CharField(unique=True, max_length=100)),\n ],\n ),\n migrations.CreateModel(\n name='CourseRecord',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Course_Title', models.CharField(unique=True, max_length=50)),\n ('Course_Code', models.CharField(unique=True, max_length=10)),\n ('Course_Unit', models.PositiveSmallIntegerField()),\n ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),\n ('level', models.ForeignKey(to='Qbank.ClassLevel')),\n ],\n ),\n migrations.CreateModel(\n name='QuestionBank',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('CourseTitle', models.CharField(max_length=50)),\n ('CourseCode', models.CharField(max_length=10)),\n ('CourseUnit', models.IntegerField()),\n ('Semester', models.CharField(choices=[('First_Semester', 'First_Semester'), ('Second_Semester', 'Second_Semester')], max_length=20, default='Select_Semester')),\n ('Date', models.DateField()),\n ('question_papers', models.FileField(upload_to='QuestionPapers')),\n ('level', models.ForeignKey(to='Qbank.ClassLevel')),\n ],\n ),\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('Account_Type', models.CharField(choices=[('L', 'Lecturer'), ('S', 'Student')], max_length=1, default='S')),\n ('Upload_Picture', models.ImageField(upload_to='profile_images', blank=True)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Solution:
def jump(self, nums: List[int]) ->int:
l = len(nums)
jump = 0
curEnd = 0
curFarthest = 0
for i in range(l - 1):
curFarthest = max(curFarthest, i + nums[i])
if i == curEnd:
jump += 1
curEnd = curFarthest
return jump
|
normal
|
{
"blob_id": "763d448bc447b88d5f2de777a475a1dd50906527",
"index": 7491,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def jump(self, nums: List[int]) ->int:\n l = len(nums)\n jump = 0\n curEnd = 0\n curFarthest = 0\n for i in range(l - 1):\n curFarthest = max(curFarthest, i + nums[i])\n if i == curEnd:\n jump += 1\n curEnd = curFarthest\n return jump\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def config_from_env(config):
username = os.getenv('OSF_USERNAME')
if username is not None:
config['username'] = username
project = os.getenv('OSF_PROJECT')
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
if args.project is None:
sys.exit(
'You have to specify a project ID via the command line, configuration file or environment variable.'
)
password = None
if username is not None:
password = os.getenv('OSF_PASSWORD')
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit('Please set a username (run `osf -h` for details).')
else:
sys.exit('You are not authorized to access this project.')
return return_value
return wrapper
<|reserved_special_token_0|>
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, 'wb') as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit('Local file %s already exists, not overwriting.' % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print('Local file %s already matches remote.' % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def config_from_env(config):
username = os.getenv('OSF_USERNAME')
if username is not None:
config['username'] = username
project = os.getenv('OSF_PROJECT')
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
if args.project is None:
sys.exit(
'You have to specify a project ID via the command line, configuration file or environment variable.'
)
password = None
if username is not None:
password = os.getenv('OSF_PASSWORD')
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit('Please set a username (run `osf -h` for details).')
else:
sys.exit('You are not authorized to access this project.')
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
print('Provide a username for the config file [current username: {}]:'.
format(config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.
format(config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open('.osfcli.config', 'w')
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, 'wb') as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit('Local file %s already exists, not overwriting.' % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print('Local file %s already matches remote.' % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit(
'To upload a file you need to provide a username and password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError(
'Expected source ({}) to be a directory when using recursive mode.'
.format(args.source))
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force, update=
args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force, update=
args.update)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def config_from_env(config):
username = os.getenv('OSF_USERNAME')
if username is not None:
config['username'] = username
project = os.getenv('OSF_PROJECT')
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
if args.project is None:
sys.exit(
'You have to specify a project ID via the command line, configuration file or environment variable.'
)
password = None
if username is not None:
password = os.getenv('OSF_PASSWORD')
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit('Please set a username (run `osf -h` for details).')
else:
sys.exit('You are not authorized to access this project.')
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
print('Provide a username for the config file [current username: {}]:'.
format(config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.
format(config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open('.osfcli.config', 'w')
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, 'wb') as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit('Local file %s already exists, not overwriting.' % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print('Local file %s already matches remote.' % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit(
'To upload a file you need to provide a username and password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError(
'Expected source ({}) to be a directory when using recursive mode.'
.format(args.source))
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force, update=
args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force, update=
args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit(
'To remove a file you need to provide a username and password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists('.osfcli.config'):
config_ = configparser.ConfigParser()
config_.read('.osfcli.config')
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv('OSF_USERNAME')
if username is not None:
config['username'] = username
project = os.getenv('OSF_PROJECT')
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
if args.project is None:
sys.exit(
'You have to specify a project ID via the command line, configuration file or environment variable.'
)
password = None
if username is not None:
password = os.getenv('OSF_PASSWORD')
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit('Please set a username (run `osf -h` for details).')
else:
sys.exit('You are not authorized to access this project.')
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
print('Provide a username for the config file [current username: {}]:'.
format(config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.
format(config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open('.osfcli.config', 'w')
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, 'wb') as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit('Local file %s already exists, not overwriting.' % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print('Local file %s already matches remote.' % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit(
'To upload a file you need to provide a username and password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError(
'Expected source ({}) to be a directory when using recursive mode.'
.format(args.source))
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force, update=
args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force, update=
args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit(
'To remove a file you need to provide a username and password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
<|reserved_special_token_1|>
"""Command line interface to the OSF
These functions implement the functionality of the command-line interface.
"""
from __future__ import print_function
from functools import wraps
import getpass
import os
import sys
from six.moves import configparser
from six.moves import input
from tqdm import tqdm
from .api import OSF
from .exceptions import UnauthorizedException
from .utils import norm_remote_path, split_storage, makedirs, checksum
def config_from_file():
if os.path.exists(".osfcli.config"):
config_ = configparser.ConfigParser()
config_.read(".osfcli.config")
# for python2 compatibility
config = dict(config_.items('osf'))
else:
config = {}
return config
def config_from_env(config):
username = os.getenv("OSF_USERNAME")
if username is not None:
config['username'] = username
project = os.getenv("OSF_PROJECT")
if project is not None:
config['project'] = project
return config
def _get_username(args, config):
if args.username is None:
username = config.get('username')
else:
username = args.username
return username
def _setup_osf(args):
# Command line options have precedence over environment variables,
# which have precedence over the config file.
config = config_from_env(config_from_file())
username = _get_username(args, config)
project = config.get('project')
if args.project is None:
args.project = project
# still None? We are in trouble
if args.project is None:
sys.exit('You have to specify a project ID via the command line,'
' configuration file or environment variable.')
password = None
if username is not None:
password = os.getenv("OSF_PASSWORD")
# Prompt user when password is not set
if password is None:
password = getpass.getpass('Please input your password: ')
return OSF(username=username, password=password)
def might_need_auth(f):
"""Decorate a CLI function that might require authentication.
Catches any UnauthorizedException raised, prints a helpful message and
then exits.
"""
@wraps(f)
def wrapper(cli_args):
try:
return_value = f(cli_args)
except UnauthorizedException as e:
config = config_from_env(config_from_file())
username = _get_username(cli_args, config)
if username is None:
sys.exit("Please set a username (run `osf -h` for details).")
else:
sys.exit("You are not authorized to access this project.")
return return_value
return wrapper
def init(args):
"""Initialize or edit an existing .osfcli.config file."""
# reading existing config file, convert to configparser object
config = config_from_file()
config_ = configparser.ConfigParser()
config_.add_section('osf')
if 'username' not in config.keys():
config_.set('osf', 'username', '')
else:
config_.set('osf', 'username', config['username'])
if 'project' not in config.keys():
config_.set('osf', 'project', '')
else:
config_.set('osf', 'project', config['project'])
# now we can start asking for new values
print('Provide a username for the config file [current username: {}]:'.format(
config_.get('osf', 'username')))
username = input()
if username:
config_.set('osf', 'username', username)
print('Provide a project for the config file [current project: {}]:'.format(
config_.get('osf', 'project')))
project = input()
if project:
config_.set('osf', 'project', project)
cfgfile = open(".osfcli.config", "w")
config_.write(cfgfile)
cfgfile.close()
@might_need_auth
def clone(args):
"""Copy all files from all storages of a project.
The output directory defaults to the current directory.
If the project is private you need to specify a username.
If args.update is True, overwrite any existing local files only if local and
remote files differ.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
output_dir = args.project
if args.output is not None:
output_dir = args.output
with tqdm(unit='files') as pbar:
for store in project.storages:
prefix = os.path.join(output_dir, store.name)
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
path = os.path.join(prefix, path)
if os.path.exists(path) and args.update:
if checksum(path) == file_.hashes.get('md5'):
continue
directory, _ = os.path.split(path)
makedirs(directory, exist_ok=True)
with open(path, "wb") as f:
file_.write_to(f)
pbar.update()
@might_need_auth
def fetch(args):
"""Fetch an individual file from a project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
The local path defaults to the name of the remote file.
If the project is private you need to specify a username.
If args.force is True, write local file even if that file already exists.
If args.force is False but args.update is True, overwrite an existing local
file only if local and remote files differ.
"""
storage, remote_path = split_storage(args.remote)
local_path = args.local
if local_path is None:
_, local_path = os.path.split(remote_path)
local_path_exists = os.path.exists(local_path)
if local_path_exists and not args.force and not args.update:
sys.exit("Local file %s already exists, not overwriting." % local_path)
directory, _ = os.path.split(local_path)
if directory:
makedirs(directory, exist_ok=True)
osf = _setup_osf(args)
project = osf.project(args.project)
store = project.storage(storage)
for file_ in store.files:
if norm_remote_path(file_.path) == remote_path:
if local_path_exists and not args.force and args.update:
if file_.hashes.get('md5') == checksum(local_path):
print("Local file %s already matches remote." % local_path)
break
with open(local_path, 'wb') as fp:
file_.write_to(fp)
# only fetching one file so we are done
break
@might_need_auth
def list_(args):
"""List all files from all storages for project.
If the project is private you need to specify a username.
"""
osf = _setup_osf(args)
project = osf.project(args.project)
for store in project.storages:
prefix = store.name
for file_ in store.files:
path = file_.path
if path.startswith('/'):
path = path[1:]
print(os.path.join(prefix, path))
@might_need_auth
def upload(args):
"""Upload a new file to an existing project.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
If the project is private you need to specify a username.
To upload a whole directory (and all its sub-directories) use the `-r`
command-line option. If your source directory name ends in a / then
files will be created directly in the remote directory. If it does not
end in a slash an extra sub-directory with the name of the local directory
will be created.
To place contents of local directory `foo` in remote directory `bar/foo`:
$ osf upload -r foo bar
To place contents of local directory `foo` in remote directory `bar`:
$ osf upload -r foo/ bar
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To upload a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.destination)
if remote_path == '':
remote_path = os.path.split(args.source)[-1]
store = project.storage(storage)
if args.recursive:
if not os.path.isdir(args.source):
raise RuntimeError("Expected source ({}) to be a directory when "
"using recursive mode.".format(args.source))
# local name of the directory that is being uploaded
_, dir_name = os.path.split(args.source)
for root, _, files in os.walk(args.source):
subdir_path = os.path.relpath(root, args.source)
for fname in files:
local_path = os.path.join(root, fname)
with open(local_path, 'rb') as fp:
# build the remote path + fname
name = os.path.join(remote_path, dir_name, subdir_path,
fname)
store.create_file(name, fp, force=args.force,
update=args.update)
else:
with open(args.source, 'rb') as fp:
store.create_file(remote_path, fp, force=args.force,
update=args.update)
@might_need_auth
def remove(args):
"""Remove a file from the project's storage.
The first part of the remote path is interpreted as the name of the
storage provider. If there is no match the default (osfstorage) is
used.
"""
osf = _setup_osf(args)
if osf.username is None or osf.password is None:
sys.exit('To remove a file you need to provide a username and'
' password.')
project = osf.project(args.project)
storage, remote_path = split_storage(args.target)
store = project.storage(storage)
for f in store.files:
if norm_remote_path(f.path) == remote_path:
f.remove()
|
flexible
|
{
"blob_id": "ca551d8e55ebb15a03077af5695782c6d72ff2fd",
"index": 8091,
"step-1": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\n<mask token>\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To remove a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.target)\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nfrom functools import wraps\nimport getpass\nimport os\nimport sys\nfrom six.moves import configparser\nfrom six.moves import input\nfrom tqdm import tqdm\nfrom .api import OSF\nfrom .exceptions import UnauthorizedException\nfrom .utils import norm_remote_path, split_storage, makedirs, checksum\n\n\ndef config_from_file():\n if os.path.exists('.osfcli.config'):\n config_ = configparser.ConfigParser()\n config_.read('.osfcli.config')\n config = dict(config_.items('osf'))\n else:\n config = {}\n return config\n\n\ndef config_from_env(config):\n username = os.getenv('OSF_USERNAME')\n if username is not None:\n config['username'] = username\n project = os.getenv('OSF_PROJECT')\n if project is not None:\n config['project'] = project\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n config = config_from_env(config_from_file())\n username = _get_username(args, config)\n project = config.get('project')\n if args.project is None:\n args.project = project\n if args.project is None:\n sys.exit(\n 'You have to specify a project ID via the command line, configuration file or environment variable.'\n )\n password = None\n if username is not None:\n password = os.getenv('OSF_PASSWORD')\n if password is None:\n password = getpass.getpass('Please input your password: ')\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n if username is None:\n sys.exit('Please set a username (run `osf -h` for details).')\n else:\n sys.exit('You are not authorized to access this project.')\n return return_value\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n print('Provide a username for the config file [current username: {}]:'.\n format(config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n print('Provide a project for the config file [current project: {}]:'.\n format(config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n cfgfile = open('.osfcli.config', 'w')\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n with open(path, 'wb') as f:\n file_.write_to(f)\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit('Local file %s already exists, not overwriting.' % local_path)\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n osf = _setup_osf(args)\n project = osf.project(args.project)\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print('Local file %s already matches remote.' % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To upload a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\n 'Expected source ({}) to be a directory when using recursive mode.'\n .format(args.source))\n _, dir_name = os.path.split(args.source)\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force, update=\n args.update)\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force, update=\n args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit(\n 'To remove a file you need to provide a username and password.')\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.target)\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-5": "\"\"\"Command line interface to the OSF\n\nThese functions implement the functionality of the command-line interface.\n\"\"\"\nfrom __future__ import print_function\n\nfrom functools import wraps\nimport getpass\nimport os\nimport sys\n\nfrom six.moves import configparser\nfrom six.moves import input\n\nfrom tqdm import tqdm\n\nfrom .api import OSF\nfrom .exceptions import UnauthorizedException\nfrom .utils import norm_remote_path, split_storage, makedirs, checksum\n\n\ndef config_from_file():\n if os.path.exists(\".osfcli.config\"):\n config_ = configparser.ConfigParser()\n config_.read(\".osfcli.config\")\n\n # for python2 compatibility\n config = dict(config_.items('osf'))\n\n else:\n config = {}\n\n return config\n\n\ndef config_from_env(config):\n username = os.getenv(\"OSF_USERNAME\")\n if username is not None:\n config['username'] = username\n\n project = os.getenv(\"OSF_PROJECT\")\n if project is not None:\n config['project'] = project\n\n return config\n\n\ndef _get_username(args, config):\n if args.username is None:\n username = config.get('username')\n else:\n username = args.username\n return username\n\n\ndef _setup_osf(args):\n # Command line options have precedence over environment variables,\n # which have precedence over the config file.\n config = config_from_env(config_from_file())\n\n username = _get_username(args, config)\n\n project = config.get('project')\n if args.project is None:\n args.project = project\n # still None? We are in trouble\n if args.project is None:\n sys.exit('You have to specify a project ID via the command line,'\n ' configuration file or environment variable.')\n\n password = None\n if username is not None:\n password = os.getenv(\"OSF_PASSWORD\")\n\n # Prompt user when password is not set\n if password is None:\n password = getpass.getpass('Please input your password: ')\n\n return OSF(username=username, password=password)\n\n\ndef might_need_auth(f):\n \"\"\"Decorate a CLI function that might require authentication.\n\n Catches any UnauthorizedException raised, prints a helpful message and\n then exits.\n \"\"\"\n @wraps(f)\n def wrapper(cli_args):\n try:\n return_value = f(cli_args)\n except UnauthorizedException as e:\n config = config_from_env(config_from_file())\n username = _get_username(cli_args, config)\n\n if username is None:\n sys.exit(\"Please set a username (run `osf -h` for details).\")\n else:\n sys.exit(\"You are not authorized to access this project.\")\n\n return return_value\n\n return wrapper\n\n\ndef init(args):\n \"\"\"Initialize or edit an existing .osfcli.config file.\"\"\"\n # reading existing config file, convert to configparser object\n config = config_from_file()\n config_ = configparser.ConfigParser()\n config_.add_section('osf')\n if 'username' not in config.keys():\n config_.set('osf', 'username', '')\n else:\n config_.set('osf', 'username', config['username'])\n if 'project' not in config.keys():\n config_.set('osf', 'project', '')\n else:\n config_.set('osf', 'project', config['project'])\n\n # now we can start asking for new values\n print('Provide a username for the config file [current username: {}]:'.format(\n config_.get('osf', 'username')))\n username = input()\n if username:\n config_.set('osf', 'username', username)\n\n print('Provide a project for the config file [current project: {}]:'.format(\n config_.get('osf', 'project')))\n project = input()\n if project:\n config_.set('osf', 'project', project)\n\n cfgfile = open(\".osfcli.config\", \"w\")\n config_.write(cfgfile)\n cfgfile.close()\n\n\n@might_need_auth\ndef clone(args):\n \"\"\"Copy all files from all storages of a project.\n\n The output directory defaults to the current directory.\n\n If the project is private you need to specify a username.\n\n If args.update is True, overwrite any existing local files only if local and\n remote files differ.\n \"\"\"\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n\n with open(path, \"wb\") as f:\n file_.write_to(f)\n\n pbar.update()\n\n\n@might_need_auth\ndef fetch(args):\n \"\"\"Fetch an individual file from a project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n The local path defaults to the name of the remote file.\n\n If the project is private you need to specify a username.\n\n If args.force is True, write local file even if that file already exists.\n If args.force is False but args.update is True, overwrite an existing local\n file only if local and remote files differ.\n \"\"\"\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break\n\n\n@might_need_auth\ndef list_(args):\n \"\"\"List all files from all storages for project.\n\n If the project is private you need to specify a username.\n \"\"\"\n osf = _setup_osf(args)\n\n project = osf.project(args.project)\n\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n print(os.path.join(prefix, path))\n\n\n@might_need_auth\ndef upload(args):\n \"\"\"Upload a new file to an existing project.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n\n If the project is private you need to specify a username.\n\n To upload a whole directory (and all its sub-directories) use the `-r`\n command-line option. If your source directory name ends in a / then\n files will be created directly in the remote directory. If it does not\n end in a slash an extra sub-directory with the name of the local directory\n will be created.\n\n To place contents of local directory `foo` in remote directory `bar/foo`:\n $ osf upload -r foo bar\n To place contents of local directory `foo` in remote directory `bar`:\n $ osf upload -r foo/ bar\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)\n\n\n@might_need_auth\ndef remove(args):\n \"\"\"Remove a file from the project's storage.\n\n The first part of the remote path is interpreted as the name of the\n storage provider. If there is no match the default (osfstorage) is\n used.\n \"\"\"\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To remove a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n\n storage, remote_path = split_storage(args.target)\n\n store = project.storage(storage)\n for f in store.files:\n if norm_remote_path(f.path) == remote_path:\n f.remove()\n",
"step-ids": [
7,
9,
10,
12,
13
]
}
|
[
7,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
length1 = len(s)
length2 = len(t)
if length1 != length2:
return False
s = sorted(s)
t = sorted(t)
for i in range(0, length1):
if s[i] != t[i]:
return False
return True
<|reserved_special_token_1|>
"""
LeetCode Problem: 242. Valid Anagram
Link: https://leetcode.com/problems/valid-anagram/
Written by: Mostofa Adib Shakib
Language: Python
"""
class Solution(object):
def isAnagram(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
length1 = len(s)
length2 = len(t)
if length1 != length2:
return False
s = sorted(s) #sorted the string in alphanumeric order
t = sorted(t) #sorted the string in alphanumeric order
for i in range(0, length1):
if s[i] != t[i]:
return False # return false if the two sorted strings are not the same.
return True # if the sorted strings are same return True
|
flexible
|
{
"blob_id": "a4f932a8566afe0265dc1057d0f6534a608697f7",
"index": 365,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n length1 = len(s)\n length2 = len(t)\n if length1 != length2:\n return False\n s = sorted(s)\n t = sorted(t)\n for i in range(0, length1):\n if s[i] != t[i]:\n return False\n return True\n",
"step-4": "\"\"\"\nLeetCode Problem: 242. Valid Anagram\nLink: https://leetcode.com/problems/valid-anagram/\nWritten by: Mostofa Adib Shakib\nLanguage: Python\n\"\"\"\n\nclass Solution(object):\n def isAnagram(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: bool\n \"\"\"\n \n length1 = len(s)\n length2 = len(t)\n \n if length1 != length2:\n return False\n \n s = sorted(s) #sorted the string in alphanumeric order\n t = sorted(t) #sorted the string in alphanumeric order\n \n for i in range(0, length1):\n if s[i] != t[i]:\n return False # return false if the two sorted strings are not the same.\n\n return True # if the sorted strings are same return True",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('./feature/')
<|reserved_special_token_0|>
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',
help='')
parser.add_argument('-t', '--task', type=int, default=0, help='')
parser.add_argument('-f', '--feature_type', type=int, default=0, help='')
parser.add_argument('-l', '--label_type', type=int, default=2, help='')
parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')
parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3
), help='')
parser.add_argument('-m', '--method', type=str, default='all', choices=
['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',
'all'], help='')
parser.add_argument('-sp', '--save_path', type=str, default=
'./cluster/result.csv', help='')
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.
dataset, task=args.task, feature_type=args.feature_type, label_type
=args.label_type, shuffle=args.shuffle, proportion=args.proportion)
train_x, train_y = sort_data(training_set)
val_x, val_y = sort_data(validation_set)
with open('./cluster/tmp.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method', 'index', 'time_cost'])
if args.method == 'kmeans':
acc = kmeans(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'dbscan':
acc = dbscan(train_x, train_y)
elif args.method == 'AC':
acc = AC(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'AP':
acc = AP(train_x, train_y)
elif args.method == 'meanshift':
acc = meanshift(train_x, train_y)
elif args.method == 'S_C':
acc = S_C(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'FA':
acc = FA(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'MBK':
acc = MBK(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'all':
acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))
acc_ac = AC(train_x, train_y, len(np.unique(train_y)))
acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))
acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))
acc_db = dbscan(train_x, train_y)
tmp_path = os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv', args.save_path)
<|reserved_special_token_1|>
import numpy as np
import sklearn.cluster as cluster
import os
import time
import argparse
import csv
from sklearn import metrics
import sys
sys.path.append('./feature/')
import feature_extraction as fe
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',
help='')
parser.add_argument('-t', '--task', type=int, default=0, help='')
parser.add_argument('-f', '--feature_type', type=int, default=0, help='')
parser.add_argument('-l', '--label_type', type=int, default=2, help='')
parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')
parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3
), help='')
parser.add_argument('-m', '--method', type=str, default='all', choices=
['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',
'all'], help='')
parser.add_argument('-sp', '--save_path', type=str, default=
'./cluster/result.csv', help='')
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.
dataset, task=args.task, feature_type=args.feature_type, label_type
=args.label_type, shuffle=args.shuffle, proportion=args.proportion)
train_x, train_y = sort_data(training_set)
val_x, val_y = sort_data(validation_set)
with open('./cluster/tmp.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method', 'index', 'time_cost'])
if args.method == 'kmeans':
acc = kmeans(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'dbscan':
acc = dbscan(train_x, train_y)
elif args.method == 'AC':
acc = AC(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'AP':
acc = AP(train_x, train_y)
elif args.method == 'meanshift':
acc = meanshift(train_x, train_y)
elif args.method == 'S_C':
acc = S_C(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'FA':
acc = FA(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'MBK':
acc = MBK(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'all':
acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))
acc_ac = AC(train_x, train_y, len(np.unique(train_y)))
acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))
acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))
acc_db = dbscan(train_x, train_y)
tmp_path = os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv', args.save_path)
<|reserved_special_token_1|>
#聚类算法:
# kmeans
# 密度聚类:DBSCAN
# 层次聚类:AgglomerativeClustering
# 谱聚类:SpectralClustering
# 分批kmeans:MiniBatchKMeans
# 评价指标:FMI(Fowlkes–Mallows index)
# 排除:特征聚类:FeatureAgglomeration# 亲和传播聚类(AP)聚类:affinity_propagation# 偏移均值向量:MeanShift
import numpy as np
import sklearn.cluster as cluster
import os
import time
import argparse
import csv
from sklearn import metrics
import sys
sys.path.append('./feature/')
import feature_extraction as fe
def sort_data(data_list):
x_list=[]
y_list=[]
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array=np.array(x_list)
y_array=np.array(y_list)
return x_array,y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output=func(*args, **kw)
time_cost=time.time() - local_time
print('{} run time is {}'.format(func.__name__,time_cost))
with open("./cluster/tmp.csv","a+") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__,output,time_cost])
return output,time_cost
return wrapper
@print_run_time
def kmeans (train_x,train_y,num_cluster = 5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,km_cluster.labels_)
# print("kmeans的FMI评价分值为:%f"%(fmi))
return fmi
@print_run_time
def dbscan(train_x,train_y):
# 密度聚类
db = cluster.DBSCAN(eps=0.2,min_samples=3)
db.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,db.labels_)
return fmi
@print_run_time
def AC(train_x,train_y,num_cluster = 5):
# 层次聚类
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
# #计算ARI指数
# ARI = (metrics.adjusted_rand_score(train_y, predicted_labels))
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,ac.labels_)
return fmi
@print_run_time
# def AP(train_x,train_y):
# #亲和传播聚类(AP)聚类
# ap = cluster.affinity_propagation(preference=-50).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ap.labels_)
# return fmi
# @print_run_time
# def meanshift(train_x,train_y):
# #偏移均值向量(meanshift)
# ms = cluster.MeanShift(bandwidth=2).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ms.labels_)
# return fmi
@print_run_time
def S_C(train_x,train_y,num_cluster = 5):
#谱聚类
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,sc.labels_)
return fmi
# @print_run_time
# def FA(train_x,train_y,num_cluster = 5):
# #特征聚类
# fa = cluster.FeatureAgglomeration(n_clusters=num_cluster).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,fa.labels_)
# return fmi
@print_run_time
def MBK(train_x,train_y,num_cluster = 5):
#分批kmeans
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,mbk.labels_)
return fmi
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("-d", "--dataset", type=str, default="cit-HepPh", help="")
parser.add_argument("-t", "--task", type=int, default=0, help="")
parser.add_argument("-f", "--feature_type", type=int, default=0, help="")
parser.add_argument("-l", "--label_type", type=int, default=2, help="")
parser.add_argument("-s", "--shuffle", type=bool, default=True, help="")
parser.add_argument("-p", "--proportion", type=tuple, default=(0.7, 0.3), help="")
parser.add_argument("-m", "--method", type=str, default='all',choices=['kmeans','dbscan','AC','AP','meanshift','S_C','FA','MBK','all'], help="")
parser.add_argument("-sp", "--save_path", type=str, default='./cluster/result.csv', help="")
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.dataset, task=args.task,
feature_type=args.feature_type, label_type=args.label_type,
shuffle=args.shuffle, proportion=args.proportion)
train_x,train_y=sort_data(training_set)
val_x,val_y=sort_data(validation_set)
with open("./cluster/tmp.csv","w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method','index','time_cost'])
if args.method=='kmeans':
acc = kmeans(train_x,train_y,len(np.unique(train_y)))
elif args.method=='dbscan':
acc = dbscan(train_x,train_y)
elif args.method=='AC':
acc = AC(train_x,train_y,len(np.unique(train_y)))
elif args.method=='AP':
acc = AP(train_x,train_y)
elif args.method=='meanshift':
acc = meanshift(train_x,train_y)
elif args.method=='S_C':
acc = S_C(train_x,train_y,len(np.unique(train_y)))
elif args.method=='FA':
acc = FA(train_x,train_y,len(np.unique(train_y)))
elif args.method=='MBK':
acc = MBK(train_x,train_y,len(np.unique(train_y)))
elif args.method=='all':
acc_k = kmeans(train_x,train_y,len(np.unique(train_y)))
acc_ac = AC(train_x,train_y,len(np.unique(train_y)))
acc_sc = S_C(train_x,train_y,len(np.unique(train_y)))
# acc_fa = FA(train_x,train_y,len(np.unique(train_y))) ValueError: Found input variables with inconsistent numbers of samples: [7414, 24684]
acc_mbk = MBK(train_x,train_y,len(np.unique(train_y)))
acc_db = dbscan(train_x,train_y)
# acc_ap = AP(train_x,train_y) affinity_propagation() missing 1 required positional argument: 'S'
# acc_ms = meanshift(train_x,train_y) timesout
tmp_path=os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv',args.save_path)
|
flexible
|
{
"blob_id": "aaebd9eba8a5c51c64baaf60224720b87a6364e1",
"index": 1388,
"step-1": "<mask token>\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('./feature/')\n<mask token>\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',\n help='')\n parser.add_argument('-t', '--task', type=int, default=0, help='')\n parser.add_argument('-f', '--feature_type', type=int, default=0, help='')\n parser.add_argument('-l', '--label_type', type=int, default=2, help='')\n parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')\n parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3\n ), help='')\n parser.add_argument('-m', '--method', type=str, default='all', choices=\n ['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',\n 'all'], help='')\n parser.add_argument('-sp', '--save_path', type=str, default=\n './cluster/result.csv', help='')\n args = parser.parse_args()\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.\n dataset, task=args.task, feature_type=args.feature_type, label_type\n =args.label_type, shuffle=args.shuffle, proportion=args.proportion)\n train_x, train_y = sort_data(training_set)\n val_x, val_y = sort_data(validation_set)\n with open('./cluster/tmp.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['method', 'index', 'time_cost'])\n if args.method == 'kmeans':\n acc = kmeans(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'dbscan':\n acc = dbscan(train_x, train_y)\n elif args.method == 'AC':\n acc = AC(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'AP':\n acc = AP(train_x, train_y)\n elif args.method == 'meanshift':\n acc = meanshift(train_x, train_y)\n elif args.method == 'S_C':\n acc = S_C(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'FA':\n acc = FA(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'MBK':\n acc = MBK(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'all':\n acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))\n acc_ac = AC(train_x, train_y, len(np.unique(train_y)))\n acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))\n acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))\n acc_db = dbscan(train_x, train_y)\n tmp_path = os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv', args.save_path)\n",
"step-4": "import numpy as np\nimport sklearn.cluster as cluster\nimport os\nimport time\nimport argparse\nimport csv\nfrom sklearn import metrics\nimport sys\nsys.path.append('./feature/')\nimport feature_extraction as fe\n\n\ndef sort_data(data_list):\n x_list = []\n y_list = []\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array = np.array(x_list)\n y_array = np.array(y_list)\n return x_array, y_array\n\n\ndef print_run_time(func):\n\n def wrapper(*args, **kw):\n local_time = time.time()\n output = func(*args, **kw)\n time_cost = time.time() - local_time\n print('{} run time is {}'.format(func.__name__, time_cost))\n with open('./cluster/tmp.csv', 'a+') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([func.__name__, output, time_cost])\n return output, time_cost\n return wrapper\n\n\n@print_run_time\ndef kmeans(train_x, train_y, num_cluster=5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)\n return fmi\n\n\n@print_run_time\ndef dbscan(train_x, train_y):\n db = cluster.DBSCAN(eps=0.2, min_samples=3)\n db.fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)\n return fmi\n\n\n@print_run_time\ndef AC(train_x, train_y, num_cluster=5):\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)\n return fmi\n\n\n@print_run_time\n@print_run_time\ndef S_C(train_x, train_y, num_cluster=5):\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)\n return fmi\n\n\n@print_run_time\ndef MBK(train_x, train_y, num_cluster=5):\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)\n return fmi\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',\n help='')\n parser.add_argument('-t', '--task', type=int, default=0, help='')\n parser.add_argument('-f', '--feature_type', type=int, default=0, help='')\n parser.add_argument('-l', '--label_type', type=int, default=2, help='')\n parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')\n parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3\n ), help='')\n parser.add_argument('-m', '--method', type=str, default='all', choices=\n ['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',\n 'all'], help='')\n parser.add_argument('-sp', '--save_path', type=str, default=\n './cluster/result.csv', help='')\n args = parser.parse_args()\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.\n dataset, task=args.task, feature_type=args.feature_type, label_type\n =args.label_type, shuffle=args.shuffle, proportion=args.proportion)\n train_x, train_y = sort_data(training_set)\n val_x, val_y = sort_data(validation_set)\n with open('./cluster/tmp.csv', 'w') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(['method', 'index', 'time_cost'])\n if args.method == 'kmeans':\n acc = kmeans(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'dbscan':\n acc = dbscan(train_x, train_y)\n elif args.method == 'AC':\n acc = AC(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'AP':\n acc = AP(train_x, train_y)\n elif args.method == 'meanshift':\n acc = meanshift(train_x, train_y)\n elif args.method == 'S_C':\n acc = S_C(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'FA':\n acc = FA(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'MBK':\n acc = MBK(train_x, train_y, len(np.unique(train_y)))\n elif args.method == 'all':\n acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))\n acc_ac = AC(train_x, train_y, len(np.unique(train_y)))\n acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))\n acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))\n acc_db = dbscan(train_x, train_y)\n tmp_path = os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv', args.save_path)\n",
"step-5": "#聚类算法:\n # kmeans\n # 密度聚类:DBSCAN\n # 层次聚类:AgglomerativeClustering \n # 谱聚类:SpectralClustering\n # 分批kmeans:MiniBatchKMeans\n# 评价指标:FMI(Fowlkes–Mallows index)\n# 排除:特征聚类:FeatureAgglomeration# 亲和传播聚类(AP)聚类:affinity_propagation# 偏移均值向量:MeanShift\nimport numpy as np\nimport sklearn.cluster as cluster\nimport os\nimport time\nimport argparse\nimport csv\nfrom sklearn import metrics\nimport sys\nsys.path.append('./feature/')\nimport feature_extraction as fe\n\ndef sort_data(data_list):\n x_list=[]\n y_list=[]\n for data in data_list:\n x_list.append(data[0])\n y_list.append(data[1])\n x_array=np.array(x_list)\n y_array=np.array(y_list)\n return x_array,y_array\n\ndef print_run_time(func): \n def wrapper(*args, **kw): \n local_time = time.time() \n output=func(*args, **kw)\n time_cost=time.time() - local_time\n print('{} run time is {}'.format(func.__name__,time_cost))\n with open(\"./cluster/tmp.csv\",\"a+\") as csvfile: \n writer = csv.writer(csvfile)\n writer.writerow([func.__name__,output,time_cost])\n return output,time_cost\n return wrapper\n\n@print_run_time\ndef kmeans (train_x,train_y,num_cluster = 5):\n km_cluster = cluster.KMeans(n_clusters=num_cluster)\n km_cluster.fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,km_cluster.labels_)\n # print(\"kmeans的FMI评价分值为:%f\"%(fmi))\n return fmi\n\n@print_run_time\ndef dbscan(train_x,train_y):\n # 密度聚类\n db = cluster.DBSCAN(eps=0.2,min_samples=3)\n db.fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,db.labels_)\n return fmi\n\n@print_run_time\ndef AC(train_x,train_y,num_cluster = 5):\n # 层次聚类\n ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)\n ac.fit(train_x)\n predicted_labels = ac.fit_predict(train_x)\n # #计算ARI指数\n # ARI = (metrics.adjusted_rand_score(train_y, predicted_labels))\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,ac.labels_)\n \n return fmi\n@print_run_time\n# def AP(train_x,train_y):\n# #亲和传播聚类(AP)聚类\n# ap = cluster.affinity_propagation(preference=-50).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,ap.labels_)\n# return fmi \n\n# @print_run_time\n# def meanshift(train_x,train_y):\n# #偏移均值向量(meanshift)\n# ms = cluster.MeanShift(bandwidth=2).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,ms.labels_)\n# return fmi\n\n@print_run_time\ndef S_C(train_x,train_y,num_cluster = 5):\n #谱聚类\n sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,sc.labels_)\n return fmi\n\n# @print_run_time\n# def FA(train_x,train_y,num_cluster = 5):\n# #特征聚类\n# fa = cluster.FeatureAgglomeration(n_clusters=num_cluster).fit(train_x)\n\n# #FMI指数:与真实值对比\n# fmi = metrics.fowlkes_mallows_score(train_y,fa.labels_)\n# return fmi\n\n@print_run_time\ndef MBK(train_x,train_y,num_cluster = 5):\n #分批kmeans\n mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)\n\n #FMI指数:与真实值对比\n fmi = metrics.fowlkes_mallows_score(train_y,mbk.labels_)\n return fmi\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\"-d\", \"--dataset\", type=str, default=\"cit-HepPh\", help=\"\")\n parser.add_argument(\"-t\", \"--task\", type=int, default=0, help=\"\")\n parser.add_argument(\"-f\", \"--feature_type\", type=int, default=0, help=\"\")\n parser.add_argument(\"-l\", \"--label_type\", type=int, default=2, help=\"\")\n parser.add_argument(\"-s\", \"--shuffle\", type=bool, default=True, help=\"\")\n parser.add_argument(\"-p\", \"--proportion\", type=tuple, default=(0.7, 0.3), help=\"\")\n parser.add_argument(\"-m\", \"--method\", type=str, default='all',choices=['kmeans','dbscan','AC','AP','meanshift','S_C','FA','MBK','all'], help=\"\")\n parser.add_argument(\"-sp\", \"--save_path\", type=str, default='./cluster/result.csv', help=\"\") \n args = parser.parse_args()\n\n training_set, validation_set, test_set = fe.get_datasets(dataset=args.dataset, task=args.task,\n feature_type=args.feature_type, label_type=args.label_type,\n shuffle=args.shuffle, proportion=args.proportion)\n train_x,train_y=sort_data(training_set)\n val_x,val_y=sort_data(validation_set)\n\n with open(\"./cluster/tmp.csv\",\"w\") as csvfile: \n writer = csv.writer(csvfile)\n writer.writerow(['method','index','time_cost'])\n\n if args.method=='kmeans':\n acc = kmeans(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='dbscan':\n acc = dbscan(train_x,train_y)\n elif args.method=='AC':\n acc = AC(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='AP':\n acc = AP(train_x,train_y)\n elif args.method=='meanshift':\n acc = meanshift(train_x,train_y)\n elif args.method=='S_C':\n acc = S_C(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='FA':\n acc = FA(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='MBK':\n acc = MBK(train_x,train_y,len(np.unique(train_y)))\n elif args.method=='all':\n acc_k = kmeans(train_x,train_y,len(np.unique(train_y)))\n acc_ac = AC(train_x,train_y,len(np.unique(train_y)))\n acc_sc = S_C(train_x,train_y,len(np.unique(train_y)))\n # acc_fa = FA(train_x,train_y,len(np.unique(train_y))) ValueError: Found input variables with inconsistent numbers of samples: [7414, 24684]\n acc_mbk = MBK(train_x,train_y,len(np.unique(train_y)))\n acc_db = dbscan(train_x,train_y)\n # acc_ap = AP(train_x,train_y) affinity_propagation() missing 1 required positional argument: 'S'\n # acc_ms = meanshift(train_x,train_y) timesout\n \n\n tmp_path=os.path.abspath('./cluster/tmp.csv')\n os.rename('./cluster/tmp.csv',args.save_path)\n",
"step-ids": [
1,
7,
8,
9,
10
]
}
|
[
1,
7,
8,
9,
10
] |
from slacker import Slacker
import vk_api
import time
import logging
from settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL
slack = Slacker(SLACK_TOKEN)
class Vts:
def __init__(self):
self.last_comment_id = 0
self.vk = None
def update_vk(self):
if self.vk is not None:
return
vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)
try:
vk_session.authorization()
except vk_api.AuthorizationError as error_msg:
logging.error(error_msg)
return
except vk_api.Captcha as captcha:
logging.error(captcha)
return
self.vk = vk_session.get_api()
def update_last_comment_id(self):
self.update_vk()
if self.vk is None:
return
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID, sort='desc', count=1)
if response['count'] == 0:
time.sleep(5)
return
self.last_comment_id = response['items'][0]['id']
print('Set initial id to ' + str(self.last_comment_id))
def get_comments(self):
self.update_vk()
if self.vk is None:
return [], []
response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID,
start_comment_id=self.last_comment_id, extended=1)
return response['items'], response['profiles']
def get_topic(self):
self.update_vk()
response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[TOPIC_ID])
if response['count'] == 0:
return None
return response['items'][0]
def run(self):
while True:
if self.last_comment_id == 0:
self.update_last_comment_id()
topic = self.get_topic()
if topic is None:
logging.warning('Topic not found')
time.sleep(60)
continue
comments, profiles = self.get_comments()
if len(comments) == 0:
time.sleep(5)
continue
users = dict()
for profile in profiles:
users[profile['id']] = profile
for comment in comments:
id = comment['id']
if id > self.last_comment_id:
self.last_comment_id = id
text = comment['text']
title = topic['title']
user_id = abs(comment['from_id'])
try:
user = users[user_id]
username = ' '.join([user['first_name'], user['last_name']])
except KeyError:
username = ''
date = comment['date']
message_date = '<!date^' + str(date) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'
text = "\n".join(map(lambda s: ">" + s, text.split("\n")))
message = '>*' + title + '*\n>_' + username + '_ (' + message_date + ')\n' + text
slack.chat.post_message(channel='#random', text=message, username='vts', icon_url=ICON_URL)
logging.info('Posted comment_id=%s\n%s', id, message)
if __name__ == '__main__':
vts = Vts()
try:
while True:
try:
vts.run()
except vk_api.requests.exceptions.ConnectionError:
time.sleep(10)
except KeyboardInterrupt:
pass
|
normal
|
{
"blob_id": "885e02cbf78412d77bd17eba64a8a1a52aaed0df",
"index": 5837,
"step-1": "<mask token>\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-3": "<mask token>\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-4": "from slacker import Slacker\nimport vk_api\nimport time\nimport logging\nfrom settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n if self.vk is None:\n return\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n if self.vk is None:\n return [], []\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=\n TOPIC_ID, start_comment_id=self.last_comment_id, extended=1)\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[\n TOPIC_ID])\n if response['count'] == 0:\n return None\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n comments, profiles = self.get_comments()\n if len(comments) == 0:\n time.sleep(5)\n continue\n users = dict()\n for profile in profiles:\n users[profile['id']] = profile\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user[\n 'last_name']])\n except KeyError:\n username = ''\n date = comment['date']\n message_date = '<!date^' + str(date\n ) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = '\\n'.join(map(lambda s: '>' + s, text.split('\\n')))\n message = ('>*' + title + '*\\n>_' + username + '_ (' +\n message_date + ')\\n' + text)\n slack.chat.post_message(channel='#random', text=message,\n username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-5": "from slacker import Slacker\n\nimport vk_api\n\nimport time\n\nimport logging\n\nfrom settings import SLACK_TOKEN, VK_LOGIN, VK_PASSWORD, GROUP_ID, TOPIC_ID, ICON_URL\n\nslack = Slacker(SLACK_TOKEN)\n\n\nclass Vts:\n def __init__(self):\n self.last_comment_id = 0\n self.vk = None\n\n def update_vk(self):\n if self.vk is not None:\n return\n\n vk_session = vk_api.VkApi(VK_LOGIN, VK_PASSWORD)\n\n try:\n vk_session.authorization()\n except vk_api.AuthorizationError as error_msg:\n logging.error(error_msg)\n return\n except vk_api.Captcha as captcha:\n logging.error(captcha)\n return\n\n self.vk = vk_session.get_api()\n\n def update_last_comment_id(self):\n self.update_vk()\n\n if self.vk is None:\n return\n\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID, sort='desc', count=1)\n if response['count'] == 0:\n time.sleep(5)\n return\n self.last_comment_id = response['items'][0]['id']\n print('Set initial id to ' + str(self.last_comment_id))\n\n def get_comments(self):\n self.update_vk()\n\n if self.vk is None:\n return [], []\n\n response = self.vk.board.getComments(group_id=GROUP_ID, topic_id=TOPIC_ID,\n start_comment_id=self.last_comment_id, extended=1)\n\n return response['items'], response['profiles']\n\n def get_topic(self):\n self.update_vk()\n\n response = self.vk.board.getTopics(group_id=GROUP_ID, topic_ids=[TOPIC_ID])\n if response['count'] == 0:\n return None\n\n return response['items'][0]\n\n def run(self):\n while True:\n if self.last_comment_id == 0:\n self.update_last_comment_id()\n\n topic = self.get_topic()\n if topic is None:\n logging.warning('Topic not found')\n time.sleep(60)\n continue\n\n comments, profiles = self.get_comments()\n\n if len(comments) == 0:\n time.sleep(5)\n continue\n\n users = dict()\n\n for profile in profiles:\n users[profile['id']] = profile\n\n for comment in comments:\n id = comment['id']\n if id > self.last_comment_id:\n self.last_comment_id = id\n text = comment['text']\n title = topic['title']\n user_id = abs(comment['from_id'])\n try:\n user = users[user_id]\n username = ' '.join([user['first_name'], user['last_name']])\n except KeyError:\n username = ''\n\n date = comment['date']\n message_date = '<!date^' + str(date) + '^Posted {date} {time}|Posted 2014-02-18 6:39:42>'\n text = \"\\n\".join(map(lambda s: \">\" + s, text.split(\"\\n\")))\n message = '>*' + title + '*\\n>_' + username + '_ (' + message_date + ')\\n' + text\n slack.chat.post_message(channel='#random', text=message, username='vts', icon_url=ICON_URL)\n logging.info('Posted comment_id=%s\\n%s', id, message)\n\nif __name__ == '__main__':\n vts = Vts()\n try:\n while True:\n try:\n vts.run()\n except vk_api.requests.exceptions.ConnectionError:\n time.sleep(10)\n except KeyboardInterrupt:\n pass\n",
"step-ids": [
7,
8,
9,
10,
11
]
}
|
[
7,
8,
9,
10,
11
] |
# Generated by Django 3.0.5 on 2020-04-30 06:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products_app', '0003_auto_20200429_0739'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
migrations.RemoveField(
model_name='item',
name='stock',
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('items', models.ManyToManyField(to='products_app.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products_app.User')),
],
),
]
|
normal
|
{
"blob_id": "cdc8c8aba384b7b1b5e741ffe4309eaee30aaada",
"index": 5405,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products_app', '0003_auto_20200429_0739')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=100)), (\n 'email', models.EmailField(max_length=254))]), migrations.\n RemoveField(model_name='item', name='stock'), migrations.\n CreateModel(name='Order', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('items', models.ManyToManyField(to='products_app.Item')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='products_app.User'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('products_app', '0003_auto_20200429_0739')]\n operations = [migrations.CreateModel(name='User', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('name', models.CharField(max_length=100)), (\n 'email', models.EmailField(max_length=254))]), migrations.\n RemoveField(model_name='item', name='stock'), migrations.\n CreateModel(name='Order', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('items', models.ManyToManyField(to='products_app.Item')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='products_app.User'))])]\n",
"step-5": "# Generated by Django 3.0.5 on 2020-04-30 06:26\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('products_app', '0003_auto_20200429_0739'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('email', models.EmailField(max_length=254)),\n ],\n ),\n migrations.RemoveField(\n model_name='item',\n name='stock',\n ),\n migrations.CreateModel(\n name='Order',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('items', models.ManyToManyField(to='products_app.Item')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products_app.User')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from collections import deque
s = list(input().upper())
new = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남
n = {}
for i in new:
n[i] = s.count(i)
cnt = deque()
for k, v in n.items():
cnt.append(v)
if cnt.count(max(cnt)) >1:
print('?')
else:
print(max(n, key=n.get))
|
normal
|
{
"blob_id": "5dcb20f52b5041d5f9ea028b383e0f2f10104af9",
"index": 9486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in new:\n n[i] = s.count(i)\n<mask token>\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-3": "<mask token>\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-4": "from collections import deque\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-5": "from collections import deque\ns = list(input().upper())\nnew = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남\nn = {}\nfor i in new:\n n[i] = s.count(i)\n\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\n\nif cnt.count(max(cnt)) >1:\n print('?')\nelse:\n print(max(n, key=n.get))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class BaseTask(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger('adjutant')
if task_model:
self.task = task_model
self._refresh_actions()
else:
action_serializer_list = self._instantiate_action_serializers(
action_data)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
keystone_user = task_data.get('keystone_user', {})
self.task = Task.objects.create(keystone_user=keystone_user,
project_id=keystone_user.get('project_id'), task_type=self.
task_type, hash_key=hash_key)
self.task.save()
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action['serializer'].validated_data
self.actions.append(action['action'](data=data, task=self.
task, order=i))
self.logger.info("(%s) - '%s' task created (%s)." % (timezone.
now(), self.task_type, self.task.uuid))
<|reserved_special_token_0|>
def _create_task_hash(self, action_list):
hashable_list = [self.task_type]
for action in action_list:
hashable_list.append(action['name'])
if not action['serializer']:
continue
fields = sorted(action['serializer'].validated_data.keys())
for field in fields:
try:
hashable_list.append(action['serializer'].
validated_data[field])
except KeyError:
if field == 'username' and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=
0, cancelled=0)
if not duplicate_tasks:
return
if self.duplicate_policy == 'cancel':
now = timezone.now()
self.logger.info(
'(%s) - Task is a duplicate - Cancelling old tasks.' % now)
for task in duplicate_tasks:
task.add_task_note(
'Task cancelled because was an old duplicate. - (%s)' % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note('Token created for task.')
try:
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text='while sending token')
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.
task.uuid, note))
note = '%s - (%s)' % (note, now)
self.task.add_task_note(note)
<|reserved_special_token_0|>
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
raise exceptions.TaskActionsInvalid(self.task,
'actions invalid', internal_message)
@property
def approved(self):
return self.task.approved
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been completed.')
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(self.task,
"This task hasn't been completed.")
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has been cancelled.')
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been cancelled.')
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been approved.')
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been approved.')
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action['serializer'].validated_data
action['action'].action.action_data = data
action['action'].action.save()
self._refresh_actions()
self.prepare()
<|reserved_special_token_0|>
def approve(self, approved_by='system'):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid('task invalid before approval')
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text=
'while approving task')
self.is_valid('task invalid after approval')
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
<|reserved_special_token_0|>
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = ['This field is required.']
except TypeError:
errors = [
'Improperly formated json. Should be a key-value object.']
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid('task invalid before submit')
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, 'while submiting task')
self.is_valid('task invalid after submit')
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTask(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger('adjutant')
if task_model:
self.task = task_model
self._refresh_actions()
else:
action_serializer_list = self._instantiate_action_serializers(
action_data)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
keystone_user = task_data.get('keystone_user', {})
self.task = Task.objects.create(keystone_user=keystone_user,
project_id=keystone_user.get('project_id'), task_type=self.
task_type, hash_key=hash_key)
self.task.save()
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action['serializer'].validated_data
self.actions.append(action['action'](data=data, task=self.
task, order=i))
self.logger.info("(%s) - '%s' task created (%s)." % (timezone.
now(), self.task_type, self.task.uuid))
<|reserved_special_token_0|>
def _create_task_hash(self, action_list):
hashable_list = [self.task_type]
for action in action_list:
hashable_list.append(action['name'])
if not action['serializer']:
continue
fields = sorted(action['serializer'].validated_data.keys())
for field in fields:
try:
hashable_list.append(action['serializer'].
validated_data[field])
except KeyError:
if field == 'username' and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=
0, cancelled=0)
if not duplicate_tasks:
return
if self.duplicate_policy == 'cancel':
now = timezone.now()
self.logger.info(
'(%s) - Task is a duplicate - Cancelling old tasks.' % now)
for task in duplicate_tasks:
task.add_task_note(
'Task cancelled because was an old duplicate. - (%s)' % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note('Token created for task.')
try:
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text='while sending token')
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.
task.uuid, note))
note = '%s - (%s)' % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
raise exceptions.TaskActionsInvalid(self.task,
'actions invalid', internal_message)
@property
def approved(self):
return self.task.approved
<|reserved_special_token_0|>
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been completed.')
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(self.task,
"This task hasn't been completed.")
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has been cancelled.')
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been cancelled.')
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been approved.')
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been approved.')
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action['serializer'].validated_data
action['action'].action.action_data = data
action['action'].action.save()
self._refresh_actions()
self.prepare()
<|reserved_special_token_0|>
def approve(self, approved_by='system'):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid('task invalid before approval')
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text=
'while approving task')
self.is_valid('task invalid after approval')
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = ['This field is required.']
except TypeError:
errors = [
'Improperly formated json. Should be a key-value object.']
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid('task invalid before submit')
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, 'while submiting task')
self.is_valid('task invalid after submit')
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTask(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger('adjutant')
if task_model:
self.task = task_model
self._refresh_actions()
else:
action_serializer_list = self._instantiate_action_serializers(
action_data)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
keystone_user = task_data.get('keystone_user', {})
self.task = Task.objects.create(keystone_user=keystone_user,
project_id=keystone_user.get('project_id'), task_type=self.
task_type, hash_key=hash_key)
self.task.save()
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action['serializer'].validated_data
self.actions.append(action['action'](data=data, task=self.
task, order=i))
self.logger.info("(%s) - '%s' task created (%s)." % (timezone.
now(), self.task_type, self.task.uuid))
<|reserved_special_token_0|>
def _create_task_hash(self, action_list):
hashable_list = [self.task_type]
for action in action_list:
hashable_list.append(action['name'])
if not action['serializer']:
continue
fields = sorted(action['serializer'].validated_data.keys())
for field in fields:
try:
hashable_list.append(action['serializer'].
validated_data[field])
except KeyError:
if field == 'username' and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=
0, cancelled=0)
if not duplicate_tasks:
return
if self.duplicate_policy == 'cancel':
now = timezone.now()
self.logger.info(
'(%s) - Task is a duplicate - Cancelling old tasks.' % now)
for task in duplicate_tasks:
task.add_task_note(
'Task cancelled because was an old duplicate. - (%s)' % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note('Token created for task.')
try:
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text='while sending token')
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.
task.uuid, note))
note = '%s - (%s)' % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
raise exceptions.TaskActionsInvalid(self.task,
'actions invalid', internal_message)
@property
def approved(self):
return self.task.approved
@property
def completed(self):
return self.task.completed
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been completed.')
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(self.task,
"This task hasn't been completed.")
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has been cancelled.')
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been cancelled.')
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been approved.')
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been approved.')
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action['serializer'].validated_data
action['action'].action.action_data = data
action['action'].action.save()
self._refresh_actions()
self.prepare()
<|reserved_special_token_0|>
def approve(self, approved_by='system'):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid('task invalid before approval')
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text=
'while approving task')
self.is_valid('task invalid after approval')
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = ['This field is required.']
except TypeError:
errors = [
'Improperly formated json. Should be a key-value object.']
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid('task invalid before submit')
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, 'while submiting task')
self.is_valid('task invalid after submit')
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseTask(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger('adjutant')
if task_model:
self.task = task_model
self._refresh_actions()
else:
action_serializer_list = self._instantiate_action_serializers(
action_data)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
keystone_user = task_data.get('keystone_user', {})
self.task = Task.objects.create(keystone_user=keystone_user,
project_id=keystone_user.get('project_id'), task_type=self.
task_type, hash_key=hash_key)
self.task.save()
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action['serializer'].validated_data
self.actions.append(action['action'](data=data, task=self.
task, order=i))
self.logger.info("(%s) - '%s' task created (%s)." % (timezone.
now(), self.task_type, self.task.uuid))
def _instantiate_action_serializers(self, action_data,
use_existing_actions=False):
action_serializer_list = []
if use_existing_actions:
actions = self.actions
else:
actions = self.default_actions[:]
actions += self.config.additional_actions
valid = True
for action in actions:
if use_existing_actions:
action_name = action.action.action_name
else:
action_name = action
action_class = adj_actions.ACTION_CLASSES[action_name]
if use_existing_actions:
action_class = action
if not action_class.serializer:
raise exceptions.SerializerMissingException(
'No serializer defined for action %s' % action_name)
serializer = action_class.serializer(data=action_data)
action_serializer_list.append({'name': action_name, 'action':
action_class, 'serializer': serializer})
if serializer and not serializer.is_valid():
valid = False
if not valid:
errors = {}
for action in action_serializer_list:
if action['serializer']:
errors.update(action['serializer'].errors)
raise exceptions.TaskSerializersInvalid(errors)
return action_serializer_list
def _create_task_hash(self, action_list):
hashable_list = [self.task_type]
for action in action_list:
hashable_list.append(action['name'])
if not action['serializer']:
continue
fields = sorted(action['serializer'].validated_data.keys())
for field in fields:
try:
hashable_list.append(action['serializer'].
validated_data[field])
except KeyError:
if field == 'username' and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=
0, cancelled=0)
if not duplicate_tasks:
return
if self.duplicate_policy == 'cancel':
now = timezone.now()
self.logger.info(
'(%s) - Task is a duplicate - Cancelling old tasks.' % now)
for task in duplicate_tasks:
task.add_task_note(
'Task cancelled because was an old duplicate. - (%s)' % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note('Token created for task.')
try:
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text='while sending token')
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.
task.uuid, note))
note = '%s - (%s)' % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
raise exceptions.TaskActionsInvalid(self.task,
'actions invalid', internal_message)
@property
def approved(self):
return self.task.approved
@property
def completed(self):
return self.task.completed
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been completed.')
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(self.task,
"This task hasn't been completed.")
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has been cancelled.')
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been cancelled.')
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has already been approved.')
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(self.task,
'This task has not been approved.')
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action['serializer'].validated_data
action['action'].action.action_data = data
action['action'].action.save()
self._refresh_actions()
self.prepare()
def prepare(self):
"""Run the prepare stage for all the actions.
If the task can be auto approved, this will also run the approve
stage.
"""
self.confirm_state(approved=False, completed=False, cancelled=False)
for action in self.actions:
try:
action.prepare()
except Exception as e:
handle_task_error(e, self.task, error_text=
'while setting up task')
email_conf = self.config.emails.initial
send_stage_email(self.task, email_conf)
approve_list = [act.auto_approve for act in self.actions]
if False in approve_list:
can_auto_approve = False
elif True in approve_list:
can_auto_approve = True
else:
can_auto_approve = False
if self.config.allow_auto_approve is not None:
allow_auto_approve = self.config.allow_auto_approve
else:
allow_auto_approve = self.allow_auto_approve
if can_auto_approve and not allow_auto_approve:
self.add_note('Actions allow auto aproval, but task does not.')
elif can_auto_approve:
self.add_note('Action allow auto approval. Auto approving.')
self.approve()
return
if self.send_approval_notification:
notes = {'notes': ["'%s' task needs approval." % self.task_type]}
create_notification(self.task, notes)
def approve(self, approved_by='system'):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid('task invalid before approval')
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text=
'while approving task')
self.is_valid('task invalid after approval')
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = ['This field is required.']
except TypeError:
errors = [
'Improperly formated json. Should be a key-value object.']
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid('task invalid before submit')
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, 'while submiting task')
self.is_valid('task invalid after submit')
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# Copyright (C) 2019 Catalyst Cloud Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from logging import getLogger
from confspirator import groups
from confspirator import fields
from adjutant import actions as adj_actions
from adjutant.api.models import Task
from adjutant.config import CONF
from django.utils import timezone
from adjutant.notifications.utils import create_notification
from adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error
from adjutant import exceptions
def make_task_config(task_class):
config_group = groups.DynamicNameConfigGroup()
config_group.register_child_config(
fields.BoolConfig(
"allow_auto_approve",
help_text="Override if this task allows auto_approval. "
"Otherwise uses task default.",
default=task_class.allow_auto_approve,
)
)
config_group.register_child_config(
fields.ListConfig(
"additional_actions",
help_text="Additional actions to be run as part of the task "
"after default actions.",
default=task_class.additional_actions or [],
)
)
config_group.register_child_config(
fields.IntConfig(
"token_expiry",
help_text="Override for the task token expiry. "
"Otherwise uses task default.",
default=task_class.token_expiry,
)
)
config_group.register_child_config(
fields.DictConfig(
"actions",
help_text="Action config overrides over the action defaults. "
"See 'adjutant.workflow.action_defaults'.",
is_json=True,
default=task_class.action_config or {},
sample_default={
"SomeCustomAction": {"some_action_setting": "<a-uuid-probably>"}
},
)
)
config_group.register_child_config(
fields.DictConfig(
"emails",
help_text="Email config overrides for this task over task defaults."
"See 'adjutant.workflow.emails'.",
is_json=True,
default=task_class.email_config or {},
sample_default={
"initial": None,
"token": {
"subject": "Some custom subject",
},
},
)
)
config_group.register_child_config(
fields.DictConfig(
"notifications",
help_text="Notification config overrides for this task over task defaults."
"See 'adjutant.workflow.notifications'.",
is_json=True,
default=task_class.notification_config or {},
sample_default={
"standard_handlers": ["EmailNotification"],
"error_handlers": ["EmailNotification"],
"standard_handler_config": {
"EmailNotification": {
"emails": ["[email protected]"],
"reply": "[email protected]",
}
},
"error_handler_config": {
"EmailNotification": {
"emails": ["[email protected]"],
"reply": "[email protected]",
}
},
},
)
)
return config_group
class BaseTask(object):
"""
Base class for in memory task representation.
This serves as the internal task logic handler, and is used to
define what a task looks like.
Most of the time this class shouldn't be called or used directly
as the task manager is what handles the direct interaction to the
logic here, and includes some wrapper logic to help deal with workflows.
"""
# required values in custom task
task_type = None
default_actions = None
# default values to optionally override in task definition
deprecated_task_types = None
duplicate_policy = "cancel"
send_approval_notification = True
token_requires_authentication = False
# config defaults for the task (used to generate default config):
allow_auto_approve = True
additional_actions = None
token_expiry = None
action_config = None
email_config = None
notification_config = None
def __init__(self, task_model=None, task_data=None, action_data=None):
self._config = None
self.logger = getLogger("adjutant")
if task_model:
self.task = task_model
self._refresh_actions()
else:
# raises 400 validation error
action_serializer_list = self._instantiate_action_serializers(action_data)
hash_key = self._create_task_hash(action_serializer_list)
# raises duplicate error
self._handle_duplicates(hash_key)
keystone_user = task_data.get("keystone_user", {})
self.task = Task.objects.create(
keystone_user=keystone_user,
project_id=keystone_user.get("project_id"),
task_type=self.task_type,
hash_key=hash_key,
)
self.task.save()
# Instantiate actions with serializers
self.actions = []
for i, action in enumerate(action_serializer_list):
data = action["serializer"].validated_data
# construct the action class
self.actions.append(
action["action"](data=data, task=self.task, order=i)
)
self.logger.info(
"(%s) - '%s' task created (%s)."
% (timezone.now(), self.task_type, self.task.uuid)
)
def _instantiate_action_serializers(self, action_data, use_existing_actions=False):
action_serializer_list = []
if use_existing_actions:
actions = self.actions
else:
actions = self.default_actions[:]
actions += self.config.additional_actions
# instantiate all action serializers and check validity
valid = True
for action in actions:
if use_existing_actions:
action_name = action.action.action_name
else:
action_name = action
action_class = adj_actions.ACTION_CLASSES[action_name]
if use_existing_actions:
action_class = action
# instantiate serializer class
if not action_class.serializer:
raise exceptions.SerializerMissingException(
"No serializer defined for action %s" % action_name
)
serializer = action_class.serializer(data=action_data)
action_serializer_list.append(
{"name": action_name, "action": action_class, "serializer": serializer}
)
if serializer and not serializer.is_valid():
valid = False
if not valid:
errors = {}
for action in action_serializer_list:
if action["serializer"]:
errors.update(action["serializer"].errors)
raise exceptions.TaskSerializersInvalid(errors)
return action_serializer_list
def _create_task_hash(self, action_list):
hashable_list = [
self.task_type,
]
for action in action_list:
hashable_list.append(action["name"])
if not action["serializer"]:
continue
# iterate like this to maintain consistent order for hash
fields = sorted(action["serializer"].validated_data.keys())
for field in fields:
try:
hashable_list.append(action["serializer"].validated_data[field])
except KeyError:
if field == "username" and CONF.identity.username_is_email:
continue
else:
raise
return hashlib.sha256(str(hashable_list).encode("utf-8")).hexdigest()
def _handle_duplicates(self, hash_key):
duplicate_tasks = Task.objects.filter(
hash_key=hash_key, completed=0, cancelled=0
)
if not duplicate_tasks:
return
if self.duplicate_policy == "cancel":
now = timezone.now()
self.logger.info("(%s) - Task is a duplicate - Cancelling old tasks." % now)
for task in duplicate_tasks:
task.add_task_note(
"Task cancelled because was an old duplicate. - (%s)" % now
)
task.get_task().cancel()
return
raise exceptions.TaskDuplicateFound()
def _refresh_actions(self):
self.actions = [a.get_action() for a in self.task.actions]
def _create_token(self):
self.clear_tokens()
token_expiry = self.config.token_expiry or self.token_expiry
token = create_token(self.task, token_expiry)
self.add_note("Token created for task.")
try:
# will throw a key error if the token template has not
# been specified
email_conf = self.config.emails.token
send_stage_email(self.task, email_conf, token)
except KeyError as e:
handle_task_error(e, self.task, error_text="while sending token")
def add_note(self, note):
"""
Logs the note, and also adds it to the task notes.
"""
now = timezone.now()
self.logger.info(
"(%s)(%s)(%s) - %s" % (now, self.task_type, self.task.uuid, note)
)
note = "%s - (%s)" % (note, now)
self.task.add_task_note(note)
@property
def config(self):
"""Get my config.
Returns a dict of the config for this task.
"""
if self._config is None:
try:
task_conf = CONF.workflow.tasks[self.task_type]
except KeyError:
task_conf = {}
self._config = CONF.workflow.task_defaults.overlay(task_conf)
return self._config
def is_valid(self, internal_message=None):
self._refresh_actions()
valid = all([act.valid for act in self.actions])
if not valid:
# TODO(amelia): get action invalidation reasons and raise those
raise exceptions.TaskActionsInvalid(
self.task, "actions invalid", internal_message
)
@property
def approved(self):
return self.task.approved
@property
def completed(self):
return self.task.completed
@property
def cancelled(self):
return self.task.cancelled
def confirm_state(self, approved=None, completed=None, cancelled=None):
"""Check that the Task is in a given state.
None value means state is ignored. Otherwise expects true or false.
"""
if completed is not None:
if self.task.completed and not completed:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been completed."
)
if not self.task.completed and completed:
raise exceptions.TaskStateInvalid(
self.task, "This task hasn't been completed."
)
if cancelled is not None:
if self.task.cancelled and not cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has been cancelled."
)
if not self.task.cancelled and cancelled:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been cancelled."
)
if approved is not None:
if self.task.approved and not approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has already been approved."
)
if not self.task.approved and approved:
raise exceptions.TaskStateInvalid(
self.task, "This task has not been approved."
)
def update(self, action_data):
self.confirm_state(approved=False, completed=False, cancelled=False)
action_serializer_list = self._instantiate_action_serializers(
action_data, use_existing_actions=True
)
hash_key = self._create_task_hash(action_serializer_list)
self._handle_duplicates(hash_key)
for action in action_serializer_list:
data = action["serializer"].validated_data
action["action"].action.action_data = data
action["action"].action.save()
self._refresh_actions()
self.prepare()
def prepare(self):
"""Run the prepare stage for all the actions.
If the task can be auto approved, this will also run the approve
stage.
"""
self.confirm_state(approved=False, completed=False, cancelled=False)
for action in self.actions:
try:
action.prepare()
except Exception as e:
handle_task_error(e, self.task, error_text="while setting up task")
# send initial confirmation email:
email_conf = self.config.emails.initial
send_stage_email(self.task, email_conf)
approve_list = [act.auto_approve for act in self.actions]
# TODO(amelia): It would be nice to explicitly test this, however
# currently we don't have the right combinations of
# actions to allow for it.
if False in approve_list:
can_auto_approve = False
elif True in approve_list:
can_auto_approve = True
else:
can_auto_approve = False
if self.config.allow_auto_approve is not None:
allow_auto_approve = self.config.allow_auto_approve
else:
allow_auto_approve = self.allow_auto_approve
if can_auto_approve and not allow_auto_approve:
self.add_note("Actions allow auto aproval, but task does not.")
elif can_auto_approve:
self.add_note("Action allow auto approval. Auto approving.")
self.approve()
return
if self.send_approval_notification:
notes = {"notes": ["'%s' task needs approval." % self.task_type]}
create_notification(self.task, notes)
def approve(self, approved_by="system"):
"""Run the approve stage for all the actions."""
self.confirm_state(completed=False, cancelled=False)
self.is_valid("task invalid before approval")
# We approve the task before running actions,
# that way if something goes wrong we know if it was approved,
# when it was approved, and who approved it.
self.task.approved = True
self.task.approved_on = timezone.now()
self.task.approved_by = approved_by
self.task.save()
# approve all actions
for action in self.actions:
try:
action.approve()
except Exception as e:
handle_task_error(e, self.task, error_text="while approving task")
self.is_valid("task invalid after approval")
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
else:
self.submit()
def reissue_token(self):
self.confirm_state(approved=True, completed=False, cancelled=False)
need_token = any([act.need_token for act in self.actions])
if need_token:
self._create_token()
def clear_tokens(self):
for token in self.task.tokens:
token.delete()
def submit(self, token_data=None, keystone_user=None):
self.confirm_state(approved=True, completed=False, cancelled=False)
required_fields = set()
actions = []
for action in self.task.actions:
a = action.get_action()
actions.append(a)
for field in a.token_fields:
required_fields.add(field)
if not token_data:
token_data = {}
errors = {}
data = {}
for field in required_fields:
try:
data[field] = token_data[field]
except KeyError:
errors[field] = [
"This field is required.",
]
except TypeError:
errors = ["Improperly formated json. " "Should be a key-value object."]
break
if errors:
raise exceptions.TaskTokenSerializersInvalid(self.task, errors)
self.is_valid("task invalid before submit")
for action in actions:
try:
action.submit(data, keystone_user)
except Exception as e:
handle_task_error(e, self.task, "while submiting task")
self.is_valid("task invalid after submit")
self.task.completed = True
self.task.completed_on = timezone.now()
self.task.save()
for token in self.task.tokens:
token.delete()
# Sending confirmation email:
email_conf = self.config.emails.completed
send_stage_email(self.task, email_conf)
def cancel(self):
self.confirm_state(completed=False, cancelled=False)
self.clear_tokens()
self.task.cancelled = True
self.task.save()
|
flexible
|
{
"blob_id": "cc23eeed44ff66d68c700163cca8b9f4986d497d",
"index": 7681,
"step-1": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n <mask token>\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n <mask token>\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n <mask token>\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n <mask token>\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n <mask token>\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n <mask token>\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass BaseTask(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger('adjutant')\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n action_serializer_list = self._instantiate_action_serializers(\n action_data)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n keystone_user = task_data.get('keystone_user', {})\n self.task = Task.objects.create(keystone_user=keystone_user,\n project_id=keystone_user.get('project_id'), task_type=self.\n task_type, hash_key=hash_key)\n self.task.save()\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action['serializer'].validated_data\n self.actions.append(action['action'](data=data, task=self.\n task, order=i))\n self.logger.info(\"(%s) - '%s' task created (%s).\" % (timezone.\n now(), self.task_type, self.task.uuid))\n\n def _instantiate_action_serializers(self, action_data,\n use_existing_actions=False):\n action_serializer_list = []\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n action_class = adj_actions.ACTION_CLASSES[action_name]\n if use_existing_actions:\n action_class = action\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n 'No serializer defined for action %s' % action_name)\n serializer = action_class.serializer(data=action_data)\n action_serializer_list.append({'name': action_name, 'action':\n action_class, 'serializer': serializer})\n if serializer and not serializer.is_valid():\n valid = False\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action['serializer']:\n errors.update(action['serializer'].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [self.task_type]\n for action in action_list:\n hashable_list.append(action['name'])\n if not action['serializer']:\n continue\n fields = sorted(action['serializer'].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action['serializer'].\n validated_data[field])\n except KeyError:\n if field == 'username' and CONF.identity.username_is_email:\n continue\n else:\n raise\n return hashlib.sha256(str(hashable_list).encode('utf-8')).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(hash_key=hash_key, completed=\n 0, cancelled=0)\n if not duplicate_tasks:\n return\n if self.duplicate_policy == 'cancel':\n now = timezone.now()\n self.logger.info(\n '(%s) - Task is a duplicate - Cancelling old tasks.' % now)\n for task in duplicate_tasks:\n task.add_task_note(\n 'Task cancelled because was an old duplicate. - (%s)' % now\n )\n task.get_task().cancel()\n return\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note('Token created for task.')\n try:\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text='while sending token')\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info('(%s)(%s)(%s) - %s' % (now, self.task_type, self.\n task.uuid, note))\n note = '%s - (%s)' % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n raise exceptions.TaskActionsInvalid(self.task,\n 'actions invalid', internal_message)\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been completed.')\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(self.task,\n \"This task hasn't been completed.\")\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has been cancelled.')\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been cancelled.')\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has already been approved.')\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(self.task,\n 'This task has not been approved.')\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True)\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n for action in action_serializer_list:\n data = action['serializer'].validated_data\n action['action'].action.action_data = data\n action['action'].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n self.confirm_state(approved=False, completed=False, cancelled=False)\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while setting up task')\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n approve_list = [act.auto_approve for act in self.actions]\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n if can_auto_approve and not allow_auto_approve:\n self.add_note('Actions allow auto aproval, but task does not.')\n elif can_auto_approve:\n self.add_note('Action allow auto approval. Auto approving.')\n self.approve()\n return\n if self.send_approval_notification:\n notes = {'notes': [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by='system'):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n self.confirm_state(completed=False, cancelled=False)\n self.is_valid('task invalid before approval')\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\n 'while approving task')\n self.is_valid('task invalid after approval')\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n if not token_data:\n token_data = {}\n errors = {}\n data = {}\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = ['This field is required.']\n except TypeError:\n errors = [\n 'Improperly formated json. Should be a key-value object.']\n break\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n self.is_valid('task invalid before submit')\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, 'while submiting task')\n self.is_valid('task invalid after submit')\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n <mask token>\n",
"step-5": "# Copyright (C) 2019 Catalyst Cloud Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport hashlib\nfrom logging import getLogger\n\nfrom confspirator import groups\nfrom confspirator import fields\n\nfrom adjutant import actions as adj_actions\nfrom adjutant.api.models import Task\nfrom adjutant.config import CONF\nfrom django.utils import timezone\nfrom adjutant.notifications.utils import create_notification\nfrom adjutant.tasks.v1.utils import send_stage_email, create_token, handle_task_error\nfrom adjutant import exceptions\n\n\ndef make_task_config(task_class):\n config_group = groups.DynamicNameConfigGroup()\n config_group.register_child_config(\n fields.BoolConfig(\n \"allow_auto_approve\",\n help_text=\"Override if this task allows auto_approval. \"\n \"Otherwise uses task default.\",\n default=task_class.allow_auto_approve,\n )\n )\n config_group.register_child_config(\n fields.ListConfig(\n \"additional_actions\",\n help_text=\"Additional actions to be run as part of the task \"\n \"after default actions.\",\n default=task_class.additional_actions or [],\n )\n )\n config_group.register_child_config(\n fields.IntConfig(\n \"token_expiry\",\n help_text=\"Override for the task token expiry. \"\n \"Otherwise uses task default.\",\n default=task_class.token_expiry,\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"actions\",\n help_text=\"Action config overrides over the action defaults. \"\n \"See 'adjutant.workflow.action_defaults'.\",\n is_json=True,\n default=task_class.action_config or {},\n sample_default={\n \"SomeCustomAction\": {\"some_action_setting\": \"<a-uuid-probably>\"}\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"emails\",\n help_text=\"Email config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.emails'.\",\n is_json=True,\n default=task_class.email_config or {},\n sample_default={\n \"initial\": None,\n \"token\": {\n \"subject\": \"Some custom subject\",\n },\n },\n )\n )\n config_group.register_child_config(\n fields.DictConfig(\n \"notifications\",\n help_text=\"Notification config overrides for this task over task defaults.\"\n \"See 'adjutant.workflow.notifications'.\",\n is_json=True,\n default=task_class.notification_config or {},\n sample_default={\n \"standard_handlers\": [\"EmailNotification\"],\n \"error_handlers\": [\"EmailNotification\"],\n \"standard_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n \"error_handler_config\": {\n \"EmailNotification\": {\n \"emails\": [\"[email protected]\"],\n \"reply\": \"[email protected]\",\n }\n },\n },\n )\n )\n return config_group\n\n\nclass BaseTask(object):\n \"\"\"\n Base class for in memory task representation.\n\n This serves as the internal task logic handler, and is used to\n define what a task looks like.\n\n Most of the time this class shouldn't be called or used directly\n as the task manager is what handles the direct interaction to the\n logic here, and includes some wrapper logic to help deal with workflows.\n \"\"\"\n\n # required values in custom task\n task_type = None\n default_actions = None\n\n # default values to optionally override in task definition\n deprecated_task_types = None\n duplicate_policy = \"cancel\"\n send_approval_notification = True\n token_requires_authentication = False\n\n # config defaults for the task (used to generate default config):\n allow_auto_approve = True\n additional_actions = None\n token_expiry = None\n action_config = None\n email_config = None\n notification_config = None\n\n def __init__(self, task_model=None, task_data=None, action_data=None):\n self._config = None\n self.logger = getLogger(\"adjutant\")\n\n if task_model:\n self.task = task_model\n self._refresh_actions()\n else:\n # raises 400 validation error\n action_serializer_list = self._instantiate_action_serializers(action_data)\n\n hash_key = self._create_task_hash(action_serializer_list)\n # raises duplicate error\n self._handle_duplicates(hash_key)\n\n keystone_user = task_data.get(\"keystone_user\", {})\n self.task = Task.objects.create(\n keystone_user=keystone_user,\n project_id=keystone_user.get(\"project_id\"),\n task_type=self.task_type,\n hash_key=hash_key,\n )\n self.task.save()\n\n # Instantiate actions with serializers\n self.actions = []\n for i, action in enumerate(action_serializer_list):\n data = action[\"serializer\"].validated_data\n\n # construct the action class\n self.actions.append(\n action[\"action\"](data=data, task=self.task, order=i)\n )\n self.logger.info(\n \"(%s) - '%s' task created (%s).\"\n % (timezone.now(), self.task_type, self.task.uuid)\n )\n\n def _instantiate_action_serializers(self, action_data, use_existing_actions=False):\n action_serializer_list = []\n\n if use_existing_actions:\n actions = self.actions\n else:\n actions = self.default_actions[:]\n actions += self.config.additional_actions\n\n # instantiate all action serializers and check validity\n valid = True\n for action in actions:\n if use_existing_actions:\n action_name = action.action.action_name\n else:\n action_name = action\n\n action_class = adj_actions.ACTION_CLASSES[action_name]\n\n if use_existing_actions:\n action_class = action\n\n # instantiate serializer class\n if not action_class.serializer:\n raise exceptions.SerializerMissingException(\n \"No serializer defined for action %s\" % action_name\n )\n serializer = action_class.serializer(data=action_data)\n\n action_serializer_list.append(\n {\"name\": action_name, \"action\": action_class, \"serializer\": serializer}\n )\n\n if serializer and not serializer.is_valid():\n valid = False\n\n if not valid:\n errors = {}\n for action in action_serializer_list:\n if action[\"serializer\"]:\n errors.update(action[\"serializer\"].errors)\n raise exceptions.TaskSerializersInvalid(errors)\n\n return action_serializer_list\n\n def _create_task_hash(self, action_list):\n hashable_list = [\n self.task_type,\n ]\n\n for action in action_list:\n hashable_list.append(action[\"name\"])\n if not action[\"serializer\"]:\n continue\n # iterate like this to maintain consistent order for hash\n fields = sorted(action[\"serializer\"].validated_data.keys())\n for field in fields:\n try:\n hashable_list.append(action[\"serializer\"].validated_data[field])\n except KeyError:\n if field == \"username\" and CONF.identity.username_is_email:\n continue\n else:\n raise\n\n return hashlib.sha256(str(hashable_list).encode(\"utf-8\")).hexdigest()\n\n def _handle_duplicates(self, hash_key):\n duplicate_tasks = Task.objects.filter(\n hash_key=hash_key, completed=0, cancelled=0\n )\n\n if not duplicate_tasks:\n return\n\n if self.duplicate_policy == \"cancel\":\n now = timezone.now()\n self.logger.info(\"(%s) - Task is a duplicate - Cancelling old tasks.\" % now)\n for task in duplicate_tasks:\n task.add_task_note(\n \"Task cancelled because was an old duplicate. - (%s)\" % now\n )\n task.get_task().cancel()\n return\n\n raise exceptions.TaskDuplicateFound()\n\n def _refresh_actions(self):\n self.actions = [a.get_action() for a in self.task.actions]\n\n def _create_token(self):\n self.clear_tokens()\n token_expiry = self.config.token_expiry or self.token_expiry\n token = create_token(self.task, token_expiry)\n self.add_note(\"Token created for task.\")\n try:\n # will throw a key error if the token template has not\n # been specified\n email_conf = self.config.emails.token\n send_stage_email(self.task, email_conf, token)\n except KeyError as e:\n handle_task_error(e, self.task, error_text=\"while sending token\")\n\n def add_note(self, note):\n \"\"\"\n Logs the note, and also adds it to the task notes.\n \"\"\"\n now = timezone.now()\n self.logger.info(\n \"(%s)(%s)(%s) - %s\" % (now, self.task_type, self.task.uuid, note)\n )\n note = \"%s - (%s)\" % (note, now)\n self.task.add_task_note(note)\n\n @property\n def config(self):\n \"\"\"Get my config.\n\n Returns a dict of the config for this task.\n \"\"\"\n if self._config is None:\n try:\n task_conf = CONF.workflow.tasks[self.task_type]\n except KeyError:\n task_conf = {}\n self._config = CONF.workflow.task_defaults.overlay(task_conf)\n return self._config\n\n def is_valid(self, internal_message=None):\n self._refresh_actions()\n valid = all([act.valid for act in self.actions])\n if not valid:\n # TODO(amelia): get action invalidation reasons and raise those\n raise exceptions.TaskActionsInvalid(\n self.task, \"actions invalid\", internal_message\n )\n\n @property\n def approved(self):\n return self.task.approved\n\n @property\n def completed(self):\n return self.task.completed\n\n @property\n def cancelled(self):\n return self.task.cancelled\n\n def confirm_state(self, approved=None, completed=None, cancelled=None):\n \"\"\"Check that the Task is in a given state.\n\n None value means state is ignored. Otherwise expects true or false.\n \"\"\"\n if completed is not None:\n if self.task.completed and not completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been completed.\"\n )\n if not self.task.completed and completed:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task hasn't been completed.\"\n )\n\n if cancelled is not None:\n if self.task.cancelled and not cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has been cancelled.\"\n )\n if not self.task.cancelled and cancelled:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been cancelled.\"\n )\n if approved is not None:\n if self.task.approved and not approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has already been approved.\"\n )\n if not self.task.approved and approved:\n raise exceptions.TaskStateInvalid(\n self.task, \"This task has not been approved.\"\n )\n\n def update(self, action_data):\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n action_serializer_list = self._instantiate_action_serializers(\n action_data, use_existing_actions=True\n )\n\n hash_key = self._create_task_hash(action_serializer_list)\n self._handle_duplicates(hash_key)\n\n for action in action_serializer_list:\n data = action[\"serializer\"].validated_data\n\n action[\"action\"].action.action_data = data\n action[\"action\"].action.save()\n self._refresh_actions()\n self.prepare()\n\n def prepare(self):\n \"\"\"Run the prepare stage for all the actions.\n\n If the task can be auto approved, this will also run the approve\n stage.\n \"\"\"\n\n self.confirm_state(approved=False, completed=False, cancelled=False)\n\n for action in self.actions:\n try:\n action.prepare()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while setting up task\")\n\n # send initial confirmation email:\n email_conf = self.config.emails.initial\n send_stage_email(self.task, email_conf)\n\n approve_list = [act.auto_approve for act in self.actions]\n\n # TODO(amelia): It would be nice to explicitly test this, however\n # currently we don't have the right combinations of\n # actions to allow for it.\n if False in approve_list:\n can_auto_approve = False\n elif True in approve_list:\n can_auto_approve = True\n else:\n can_auto_approve = False\n\n if self.config.allow_auto_approve is not None:\n allow_auto_approve = self.config.allow_auto_approve\n else:\n allow_auto_approve = self.allow_auto_approve\n\n if can_auto_approve and not allow_auto_approve:\n self.add_note(\"Actions allow auto aproval, but task does not.\")\n elif can_auto_approve:\n self.add_note(\"Action allow auto approval. Auto approving.\")\n self.approve()\n return\n\n if self.send_approval_notification:\n notes = {\"notes\": [\"'%s' task needs approval.\" % self.task_type]}\n create_notification(self.task, notes)\n\n def approve(self, approved_by=\"system\"):\n \"\"\"Run the approve stage for all the actions.\"\"\"\n\n self.confirm_state(completed=False, cancelled=False)\n\n self.is_valid(\"task invalid before approval\")\n\n # We approve the task before running actions,\n # that way if something goes wrong we know if it was approved,\n # when it was approved, and who approved it.\n self.task.approved = True\n self.task.approved_on = timezone.now()\n self.task.approved_by = approved_by\n self.task.save()\n\n # approve all actions\n for action in self.actions:\n try:\n action.approve()\n except Exception as e:\n handle_task_error(e, self.task, error_text=\"while approving task\")\n\n self.is_valid(\"task invalid after approval\")\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n else:\n self.submit()\n\n def reissue_token(self):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n need_token = any([act.need_token for act in self.actions])\n if need_token:\n self._create_token()\n\n def clear_tokens(self):\n for token in self.task.tokens:\n token.delete()\n\n def submit(self, token_data=None, keystone_user=None):\n self.confirm_state(approved=True, completed=False, cancelled=False)\n\n required_fields = set()\n actions = []\n for action in self.task.actions:\n a = action.get_action()\n actions.append(a)\n for field in a.token_fields:\n required_fields.add(field)\n\n if not token_data:\n token_data = {}\n\n errors = {}\n data = {}\n\n for field in required_fields:\n try:\n data[field] = token_data[field]\n except KeyError:\n errors[field] = [\n \"This field is required.\",\n ]\n except TypeError:\n errors = [\"Improperly formated json. \" \"Should be a key-value object.\"]\n break\n\n if errors:\n raise exceptions.TaskTokenSerializersInvalid(self.task, errors)\n\n self.is_valid(\"task invalid before submit\")\n\n for action in actions:\n try:\n action.submit(data, keystone_user)\n except Exception as e:\n handle_task_error(e, self.task, \"while submiting task\")\n\n self.is_valid(\"task invalid after submit\")\n\n self.task.completed = True\n self.task.completed_on = timezone.now()\n self.task.save()\n for token in self.task.tokens:\n token.delete()\n\n # Sending confirmation email:\n email_conf = self.config.emails.completed\n send_stage_email(self.task, email_conf)\n\n def cancel(self):\n self.confirm_state(completed=False, cancelled=False)\n self.clear_tokens()\n self.task.cancelled = True\n self.task.save()\n",
"step-ids": [
14,
17,
18,
20,
26
]
}
|
[
14,
17,
18,
20,
26
] |
from django.core import serializers
from django.db import models
from uuid import uuid4
from django.utils import timezone
from django.contrib.auth.models import User
class Message(models.Model):
uuid=models.CharField(max_length=50)
user=models.CharField(max_length=20)
message=models.CharField(max_length=200)
timestamp=models.DateTimeField()
def json_decode(self, jsondata):
self.uuid=jsondata['id']
self.message=jsondata['message']
self.user=jsondata['user']
self.timestamp=jsondata['timestamp']
def json_encode(self):
dict={}
dict['id']=self.uuid
dict['user']=self.user
dict['message']=self.message
dict['timestamp']=self.timestamp
return dict
def __unicode__(self):
return str(self.timestamp)+" "+self.user+": "+self.message
|
normal
|
{
"blob_id": "1476d4f488e6c55234a34dc5b6182e3b8ad4f702",
"index": 6201,
"step-1": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Message(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-3": "<mask token>\n\n\nclass Message(models.Model):\n uuid = models.CharField(max_length=50)\n user = models.CharField(max_length=20)\n message = models.CharField(max_length=200)\n timestamp = models.DateTimeField()\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-4": "from django.core import serializers\nfrom django.db import models\nfrom uuid import uuid4\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\n\nclass Message(models.Model):\n uuid = models.CharField(max_length=50)\n user = models.CharField(max_length=20)\n message = models.CharField(max_length=200)\n timestamp = models.DateTimeField()\n\n def json_decode(self, jsondata):\n self.uuid = jsondata['id']\n self.message = jsondata['message']\n self.user = jsondata['user']\n self.timestamp = jsondata['timestamp']\n\n def json_encode(self):\n dict = {}\n dict['id'] = self.uuid\n dict['user'] = self.user\n dict['message'] = self.message\n dict['timestamp'] = self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp) + ' ' + self.user + ': ' + self.message\n",
"step-5": "from django.core import serializers\nfrom django.db import models\nfrom uuid import uuid4\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n\nclass Message(models.Model):\n uuid=models.CharField(max_length=50)\n user=models.CharField(max_length=20)\n message=models.CharField(max_length=200)\n timestamp=models.DateTimeField()\n \n def json_decode(self, jsondata):\n self.uuid=jsondata['id']\n self.message=jsondata['message']\n self.user=jsondata['user']\n self.timestamp=jsondata['timestamp']\n\n def json_encode(self):\n dict={}\n dict['id']=self.uuid\n dict['user']=self.user\n dict['message']=self.message\n dict['timestamp']=self.timestamp\n return dict\n\n def __unicode__(self):\n return str(self.timestamp)+\" \"+self.user+\": \"+self.message\n \n",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
def slices(series, length):
if length <= 0:
raise ValueError("Length has to be at least 1")
elif length > len(series) or len(series) == 0:
raise ValueError("Length has to be larger than len of series")
elif length == len(series):
return [series]
else:
result = []
for i in range(0, len(series) - length + 1):
result.append(series[i:i+length])
return result
|
normal
|
{
"blob_id": "207bb7c79de069ad5d980d18cdfc5c4ab86c5197",
"index": 6544,
"step-1": "<mask token>\n",
"step-2": "def slices(series, length):\n if length <= 0:\n raise ValueError('Length has to be at least 1')\n elif length > len(series) or len(series) == 0:\n raise ValueError('Length has to be larger than len of series')\n elif length == len(series):\n return [series]\n else:\n result = []\n for i in range(0, len(series) - length + 1):\n result.append(series[i:i + length])\n return result\n",
"step-3": "def slices(series, length):\n if length <= 0:\n raise ValueError(\"Length has to be at least 1\")\n elif length > len(series) or len(series) == 0:\n raise ValueError(\"Length has to be larger than len of series\")\n elif length == len(series):\n return [series]\n else:\n result = []\n for i in range(0, len(series) - length + 1):\n result.append(series[i:i+length])\n return result\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
@pulumi.output_type
class ApplicationCredential(dict):
<|reserved_special_token_0|>
def __getitem__(self, key: str) ->Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
<|reserved_special_token_0|>
def __init__(__self__, *, credential_type: Optional[
'ApplicationCredentialCredentialType']=None, database_name:
Optional[str]=None, secret_id: Optional[str]=None):
if credential_type is not None:
pulumi.set(__self__, 'credential_type', credential_type)
if database_name is not None:
pulumi.set(__self__, 'database_name', database_name)
if secret_id is not None:
pulumi.set(__self__, 'secret_id', secret_id)
@property
@pulumi.getter(name='credentialType')
def credential_type(self) ->Optional['ApplicationCredentialCredentialType'
]:
return pulumi.get(self, 'credential_type')
@property
@pulumi.getter(name='databaseName')
def database_name(self) ->Optional[str]:
return pulumi.get(self, 'database_name')
<|reserved_special_token_0|>
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *, key: str, value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'value', value)
@property
@pulumi.getter
def key(self) ->str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter
def value(self) ->str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'value')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.output_type
class ApplicationCredential(dict):
<|reserved_special_token_0|>
def __getitem__(self, key: str) ->Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
<|reserved_special_token_0|>
def __init__(__self__, *, credential_type: Optional[
'ApplicationCredentialCredentialType']=None, database_name:
Optional[str]=None, secret_id: Optional[str]=None):
if credential_type is not None:
pulumi.set(__self__, 'credential_type', credential_type)
if database_name is not None:
pulumi.set(__self__, 'database_name', database_name)
if secret_id is not None:
pulumi.set(__self__, 'secret_id', secret_id)
@property
@pulumi.getter(name='credentialType')
def credential_type(self) ->Optional['ApplicationCredentialCredentialType'
]:
return pulumi.get(self, 'credential_type')
@property
@pulumi.getter(name='databaseName')
def database_name(self) ->Optional[str]:
return pulumi.get(self, 'database_name')
@property
@pulumi.getter(name='secretId')
def secret_id(self) ->Optional[str]:
return pulumi.get(self, 'secret_id')
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *, key: str, value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'value', value)
@property
@pulumi.getter
def key(self) ->str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter
def value(self) ->str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'value')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.output_type
class ApplicationCredential(dict):
<|reserved_special_token_0|>
def __getitem__(self, key: str) ->Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default=None) ->Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *, credential_type: Optional[
'ApplicationCredentialCredentialType']=None, database_name:
Optional[str]=None, secret_id: Optional[str]=None):
if credential_type is not None:
pulumi.set(__self__, 'credential_type', credential_type)
if database_name is not None:
pulumi.set(__self__, 'database_name', database_name)
if secret_id is not None:
pulumi.set(__self__, 'secret_id', secret_id)
@property
@pulumi.getter(name='credentialType')
def credential_type(self) ->Optional['ApplicationCredentialCredentialType'
]:
return pulumi.get(self, 'credential_type')
@property
@pulumi.getter(name='databaseName')
def database_name(self) ->Optional[str]:
return pulumi.get(self, 'database_name')
@property
@pulumi.getter(name='secretId')
def secret_id(self) ->Optional[str]:
return pulumi.get(self, 'secret_id')
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *, key: str, value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'value', value)
@property
@pulumi.getter
def key(self) ->str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter
def value(self) ->str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'value')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@pulumi.output_type
class ApplicationCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == 'credentialType':
suggest = 'credential_type'
elif key == 'databaseName':
suggest = 'database_name'
elif key == 'secretId':
suggest = 'secret_id'
if suggest:
pulumi.log.warn(
f"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead."
)
def __getitem__(self, key: str) ->Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default=None) ->Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *, credential_type: Optional[
'ApplicationCredentialCredentialType']=None, database_name:
Optional[str]=None, secret_id: Optional[str]=None):
if credential_type is not None:
pulumi.set(__self__, 'credential_type', credential_type)
if database_name is not None:
pulumi.set(__self__, 'database_name', database_name)
if secret_id is not None:
pulumi.set(__self__, 'secret_id', secret_id)
@property
@pulumi.getter(name='credentialType')
def credential_type(self) ->Optional['ApplicationCredentialCredentialType'
]:
return pulumi.get(self, 'credential_type')
@property
@pulumi.getter(name='databaseName')
def database_name(self) ->Optional[str]:
return pulumi.get(self, 'database_name')
@property
@pulumi.getter(name='secretId')
def secret_id(self) ->Optional[str]:
return pulumi.get(self, 'secret_id')
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *, key: str, value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, 'key', key)
pulumi.set(__self__, 'value', value)
@property
@pulumi.getter
def key(self) ->str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'key')
@property
@pulumi.getter
def value(self) ->str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, 'value')
<|reserved_special_token_1|>
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ApplicationCredential',
'ApplicationTag',
]
@pulumi.output_type
class ApplicationCredential(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "credentialType":
suggest = "credential_type"
elif key == "databaseName":
suggest = "database_name"
elif key == "secretId":
suggest = "secret_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ApplicationCredential.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ApplicationCredential.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
credential_type: Optional['ApplicationCredentialCredentialType'] = None,
database_name: Optional[str] = None,
secret_id: Optional[str] = None):
if credential_type is not None:
pulumi.set(__self__, "credential_type", credential_type)
if database_name is not None:
pulumi.set(__self__, "database_name", database_name)
if secret_id is not None:
pulumi.set(__self__, "secret_id", secret_id)
@property
@pulumi.getter(name="credentialType")
def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:
return pulumi.get(self, "credential_type")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> Optional[str]:
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="secretId")
def secret_id(self) -> Optional[str]:
return pulumi.get(self, "secret_id")
@pulumi.output_type
class ApplicationTag(dict):
"""
A key-value pair to associate with a resource.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
A key-value pair to associate with a resource.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
|
flexible
|
{
"blob_id": "8535020e7157699310b3412fe6c5a28ee8e61f49",
"index": 6911,
"step-1": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n <mask token>\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-2": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n <mask token>\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-3": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n <mask token>\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-4": "<mask token>\n\n\[email protected]_type\nclass ApplicationCredential(dict):\n\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == 'credentialType':\n suggest = 'credential_type'\n elif key == 'databaseName':\n suggest = 'database_name'\n elif key == 'secretId':\n suggest = 'secret_id'\n if suggest:\n pulumi.log.warn(\n f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\"\n )\n\n def __getitem__(self, key: str) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default=None) ->Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *, credential_type: Optional[\n 'ApplicationCredentialCredentialType']=None, database_name:\n Optional[str]=None, secret_id: Optional[str]=None):\n if credential_type is not None:\n pulumi.set(__self__, 'credential_type', credential_type)\n if database_name is not None:\n pulumi.set(__self__, 'database_name', database_name)\n if secret_id is not None:\n pulumi.set(__self__, 'secret_id', secret_id)\n\n @property\n @pulumi.getter(name='credentialType')\n def credential_type(self) ->Optional['ApplicationCredentialCredentialType'\n ]:\n return pulumi.get(self, 'credential_type')\n\n @property\n @pulumi.getter(name='databaseName')\n def database_name(self) ->Optional[str]:\n return pulumi.get(self, 'database_name')\n\n @property\n @pulumi.getter(name='secretId')\n def secret_id(self) ->Optional[str]:\n return pulumi.get(self, 'secret_id')\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n\n def __init__(__self__, *, key: str, value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, 'key', key)\n pulumi.set(__self__, 'value', value)\n\n @property\n @pulumi.getter\n def key(self) ->str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'key')\n\n @property\n @pulumi.getter\n def value(self) ->str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, 'value')\n",
"step-5": "# coding=utf-8\n# *** WARNING: this file was generated by the Pulumi SDK Generator. ***\n# *** Do not edit by hand unless you're certain you know what you are doing! ***\n\nimport copy\nimport warnings\nimport pulumi\nimport pulumi.runtime\nfrom typing import Any, Mapping, Optional, Sequence, Union, overload\nfrom .. import _utilities\nfrom ._enums import *\n\n__all__ = [\n 'ApplicationCredential',\n 'ApplicationTag',\n]\n\[email protected]_type\nclass ApplicationCredential(dict):\n @staticmethod\n def __key_warning(key: str):\n suggest = None\n if key == \"credentialType\":\n suggest = \"credential_type\"\n elif key == \"databaseName\":\n suggest = \"database_name\"\n elif key == \"secretId\":\n suggest = \"secret_id\"\n\n if suggest:\n pulumi.log.warn(f\"Key '{key}' not found in ApplicationCredential. Access the value via the '{suggest}' property getter instead.\")\n\n def __getitem__(self, key: str) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().__getitem__(key)\n\n def get(self, key: str, default = None) -> Any:\n ApplicationCredential.__key_warning(key)\n return super().get(key, default)\n\n def __init__(__self__, *,\n credential_type: Optional['ApplicationCredentialCredentialType'] = None,\n database_name: Optional[str] = None,\n secret_id: Optional[str] = None):\n if credential_type is not None:\n pulumi.set(__self__, \"credential_type\", credential_type)\n if database_name is not None:\n pulumi.set(__self__, \"database_name\", database_name)\n if secret_id is not None:\n pulumi.set(__self__, \"secret_id\", secret_id)\n\n @property\n @pulumi.getter(name=\"credentialType\")\n def credential_type(self) -> Optional['ApplicationCredentialCredentialType']:\n return pulumi.get(self, \"credential_type\")\n\n @property\n @pulumi.getter(name=\"databaseName\")\n def database_name(self) -> Optional[str]:\n return pulumi.get(self, \"database_name\")\n\n @property\n @pulumi.getter(name=\"secretId\")\n def secret_id(self) -> Optional[str]:\n return pulumi.get(self, \"secret_id\")\n\n\[email protected]_type\nclass ApplicationTag(dict):\n \"\"\"\n A key-value pair to associate with a resource.\n \"\"\"\n def __init__(__self__, *,\n key: str,\n value: str):\n \"\"\"\n A key-value pair to associate with a resource.\n :param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n :param str value: The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n pulumi.set(__self__, \"key\", key)\n pulumi.set(__self__, \"value\", value)\n\n @property\n @pulumi.getter\n def key(self) -> str:\n \"\"\"\n The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"key\")\n\n @property\n @pulumi.getter\n def value(self) -> str:\n \"\"\"\n The value for the tag. You can specify a value that is 1 to 255 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -. \n \"\"\"\n return pulumi.get(self, \"value\")\n\n\n",
"step-ids": [
10,
11,
12,
13,
16
]
}
|
[
10,
11,
12,
13,
16
] |
#Q7. Write a program to calculate the sum of digits of a given number.
n=int(input("Enter a number:\n"))
sum=0
while(n>0):
r=n%10
sum=sum+r
n=n//10
print("The total sum of digits is:",sum)
|
normal
|
{
"blob_id": "78e3750a1bbe9f2f6680937729c1a810bd29fd4d",
"index": 4232,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile n > 0:\n r = n % 10\n sum = sum + r\n n = n // 10\nprint('The total sum of digits is:', sum)\n",
"step-3": "n = int(input('Enter a number:\\n'))\nsum = 0\nwhile n > 0:\n r = n % 10\n sum = sum + r\n n = n // 10\nprint('The total sum of digits is:', sum)\n",
"step-4": "#Q7. Write a program to calculate the sum of digits of a given number.\r\n\r\nn=int(input(\"Enter a number:\\n\"))\r\nsum=0\r\nwhile(n>0):\r\n r=n%10\r\n sum=sum+r\r\n n=n//10\r\nprint(\"The total sum of digits is:\",sum)\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if x % 10 == y // 10 and x // 10 == y % 10:
return True
else:
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if x % 10 == y // 10 and x // 10 == y % 10:
return True
else:
return False
def anz_von_sym(lst):
"""
mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste
:param lst: die Liste
:return: Anzahl der symetrischen Paaren der Liste
"""
anz = 0
for i in range(len(lst) - 1):
for j in range(i, len(lst)):
if symetrisch(lst[i], lst[j]):
anz += 1
print('Anzahl symmetrischer Paaren:', anz)
<|reserved_special_token_1|>
"""
2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).
"""
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):
return True
else:
return False
def anz_von_sym(lst):
"""
mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste
:param lst: die Liste
:return: Anzahl der symetrischen Paaren der Liste
"""
anz = 0
for i in range(len(lst) - 1):
for j in range(i, len(lst)):
if symetrisch(lst[i], lst[j]):
anz += 1
print("Anzahl symmetrischer Paaren:", anz)
|
flexible
|
{
"blob_id": "2c6dc4d55f64d7c3c01b3f504a72904451cb4610",
"index": 6532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print('Anzahl symmetrischer Paaren:', anz)\n",
"step-4": "\"\"\"\n2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).\n\"\"\"\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print(\"Anzahl symmetrischer Paaren:\", anz)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class PyHookableTests(PyHookableMixin, unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin, unittest.TestCase):
def _check_preserves_doc(self, docs):
self.assertEqual('I have some docs', docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PyHookableTests(PyHookableMixin, unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
<|reserved_special_token_0|>
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin, unittest.TestCase):
def _check_preserves_doc(self, docs):
self.assertEqual('I have some docs', docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PyHookableMixin:
<|reserved_special_token_0|>
class HookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
from zope.hookable import hookable
if hookable is _py_hookable:
raise unittest.SkipTest('Hookable and PyHookable are the same')
return hookable(*args, **kw)
class PyHookableTests(PyHookableMixin, unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
def test_implementation_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.implementation
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin, unittest.TestCase):
def _check_preserves_doc(self, docs):
self.assertEqual('I have some docs', docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def return_bar():
return 'BAR'
<|reserved_special_token_0|>
class PyHookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
return _py_hookable(*args, **kw)
class HookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
from zope.hookable import hookable
if hookable is _py_hookable:
raise unittest.SkipTest('Hookable and PyHookable are the same')
return hookable(*args, **kw)
class PyHookableTests(PyHookableMixin, unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
def test_implementation_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.implementation
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin, unittest.TestCase):
def _check_preserves_doc(self, docs):
self.assertEqual('I have some docs', docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test the hookable support Extension
"""
import unittest
def return_foo():
return 'FOO'
def return_bar():
return 'BAR'
def not_called():
raise AssertionError("This should not be called")
class PyHookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
return _py_hookable(*args, **kw)
class HookableMixin:
def _callFUT(self, *args, **kw):
from zope.hookable import _py_hookable
from zope.hookable import hookable
if hookable is _py_hookable:
raise unittest.SkipTest("Hookable and PyHookable are the same")
return hookable(*args, **kw) # pragma: no cover
class PyHookableTests(PyHookableMixin,
unittest.TestCase):
def test_pure_python(self):
from zope.hookable import _PURE_PYTHON
from zope.hookable import _c_hookable
from zope.hookable import _py_hookable
from zope.hookable import hookable
self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)
def test_before_hook(self):
hooked = self._callFUT(return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_after_hook(self):
hooked = self._callFUT(not_called)
old = hooked.sethook(return_bar)
self.assertIs(old, not_called)
self.assertIs(hooked.original, not_called)
self.assertIs(hooked.implementation, return_bar)
self.assertEqual(hooked(), 'BAR')
def test_after_hook_and_reset(self):
hooked = self._callFUT(return_foo)
old = hooked.sethook(not_called)
hooked.reset()
self.assertIs(old, return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_original_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.original
def test_implementation_cannot_be_deleted(self):
hooked = self._callFUT(not_called)
with self.assertRaises((TypeError, AttributeError)):
del hooked.implementation
def test_no_args(self):
with self.assertRaises(TypeError):
self._callFUT()
def test_too_many_args(self):
with self.assertRaises(TypeError):
self._callFUT(not_called, not_called)
def test_w_implementation_kwarg(self):
hooked = self._callFUT(implementation=return_foo)
self.assertIs(hooked.original, return_foo)
self.assertIs(hooked.implementation, return_foo)
self.assertEqual(hooked(), 'FOO')
def test_w_unknown_kwarg(self):
with self.assertRaises(TypeError):
self._callFUT(nonesuch=42)
def test_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertIsInstance(hooked(), C)
hooked.sethook(return_bar)
self.assertEqual(hooked(), 'BAR')
class TestIssue6Py(PyHookableMixin,
unittest.TestCase):
# Make sphinx docs for hooked objects work.
# https://github.com/zopefoundation/zope.hookable/issues/6
# We need to proxy __doc__ to the original,
# and synthesize an empty __bases__ and a __dict__ attribute
# if they're not present.
def _check_preserves_doc(self, docs):
self.assertEqual("I have some docs", docs.__doc__)
hooked = self._callFUT(docs)
self.assertEqual(hooked.__doc__, docs.__doc__)
def test_preserves_doc_function(self):
def docs():
"""I have some docs"""
self._check_preserves_doc(docs)
def test_preserves_doc_class(self):
class Docs:
"""I have some docs"""
self._check_preserves_doc(Docs)
def test_empty_bases_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual((), hooked.__bases__)
def test_empty_dict_function(self):
hooked = self._callFUT(return_foo)
self.assertEqual({}, hooked.__dict__)
def test_bases_class(self):
class C:
pass
self.assertEqual(C.__bases__, (object,))
hooked = self._callFUT(C)
self.assertEqual(hooked.__bases__, (object,))
def test_dict_class(self):
class C:
pass
hooked = self._callFUT(C)
self.assertEqual(hooked.__dict__, C.__dict__)
def test_non_string_attr_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(TypeError):
getattr(hooked, 42)
with self.assertRaises(TypeError):
hooked.__getattribute__(42)
def test_unicode_attribute_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
result = hooked.__getattribute__('__bases__')
self.assertEqual(result, ())
def test_short_name(self):
# Specifically for the C implementation, which has to deal with this
hooked = self._callFUT(return_foo)
with self.assertRaises(AttributeError):
hooked.__getattribute__('')
class HookableTests(HookableMixin, PyHookableTests):
pass
class TestIssue6(HookableMixin, TestIssue6Py):
pass
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
|
flexible
|
{
"blob_id": "13c0af340c4fff815919d7cbb1cfd3116be13771",
"index": 7907,
"step-1": "<mask token>\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n <mask token>\n <mask token>\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n <mask token>\n <mask token>\n <mask token>\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n <mask token>\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PyHookableMixin:\n <mask token>\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest('Hookable and PyHookable are the same')\n return hookable(*args, **kw)\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef return_bar():\n return 'BAR'\n\n\n<mask token>\n\n\nclass PyHookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n return _py_hookable(*args, **kw)\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest('Hookable and PyHookable are the same')\n return hookable(*args, **kw)\n\n\nclass PyHookableTests(PyHookableMixin, unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin, unittest.TestCase):\n\n def _check_preserves_doc(self, docs):\n self.assertEqual('I have some docs', docs.__doc__)\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n\n\n class Docs:\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n\n\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n\n\n class C:\n pass\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\n<mask token>\n",
"step-5": "##############################################################################\n#\n# Copyright (c) 2003 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Test the hookable support Extension\n\"\"\"\nimport unittest\n\n\ndef return_foo():\n return 'FOO'\n\n\ndef return_bar():\n return 'BAR'\n\n\ndef not_called():\n raise AssertionError(\"This should not be called\")\n\n\nclass PyHookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n return _py_hookable(*args, **kw)\n\n\nclass HookableMixin:\n\n def _callFUT(self, *args, **kw):\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n if hookable is _py_hookable:\n raise unittest.SkipTest(\"Hookable and PyHookable are the same\")\n return hookable(*args, **kw) # pragma: no cover\n\n\nclass PyHookableTests(PyHookableMixin,\n unittest.TestCase):\n\n def test_pure_python(self):\n from zope.hookable import _PURE_PYTHON\n from zope.hookable import _c_hookable\n from zope.hookable import _py_hookable\n from zope.hookable import hookable\n self.assertIs(hookable, _py_hookable if _PURE_PYTHON else _c_hookable)\n\n def test_before_hook(self):\n hooked = self._callFUT(return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_after_hook(self):\n hooked = self._callFUT(not_called)\n old = hooked.sethook(return_bar)\n self.assertIs(old, not_called)\n self.assertIs(hooked.original, not_called)\n self.assertIs(hooked.implementation, return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n def test_after_hook_and_reset(self):\n hooked = self._callFUT(return_foo)\n old = hooked.sethook(not_called)\n hooked.reset()\n self.assertIs(old, return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_original_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.original\n\n def test_implementation_cannot_be_deleted(self):\n hooked = self._callFUT(not_called)\n with self.assertRaises((TypeError, AttributeError)):\n del hooked.implementation\n\n def test_no_args(self):\n with self.assertRaises(TypeError):\n self._callFUT()\n\n def test_too_many_args(self):\n with self.assertRaises(TypeError):\n self._callFUT(not_called, not_called)\n\n def test_w_implementation_kwarg(self):\n hooked = self._callFUT(implementation=return_foo)\n self.assertIs(hooked.original, return_foo)\n self.assertIs(hooked.implementation, return_foo)\n self.assertEqual(hooked(), 'FOO')\n\n def test_w_unknown_kwarg(self):\n with self.assertRaises(TypeError):\n self._callFUT(nonesuch=42)\n\n def test_class(self):\n class C:\n pass\n\n hooked = self._callFUT(C)\n self.assertIsInstance(hooked(), C)\n\n hooked.sethook(return_bar)\n self.assertEqual(hooked(), 'BAR')\n\n\nclass TestIssue6Py(PyHookableMixin,\n unittest.TestCase):\n # Make sphinx docs for hooked objects work.\n # https://github.com/zopefoundation/zope.hookable/issues/6\n # We need to proxy __doc__ to the original,\n # and synthesize an empty __bases__ and a __dict__ attribute\n # if they're not present.\n\n def _check_preserves_doc(self, docs):\n self.assertEqual(\"I have some docs\", docs.__doc__)\n\n hooked = self._callFUT(docs)\n self.assertEqual(hooked.__doc__, docs.__doc__)\n\n def test_preserves_doc_function(self):\n def docs():\n \"\"\"I have some docs\"\"\"\n self._check_preserves_doc(docs)\n\n def test_preserves_doc_class(self):\n class Docs:\n \"\"\"I have some docs\"\"\"\n\n self._check_preserves_doc(Docs)\n\n def test_empty_bases_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual((), hooked.__bases__)\n\n def test_empty_dict_function(self):\n hooked = self._callFUT(return_foo)\n self.assertEqual({}, hooked.__dict__)\n\n def test_bases_class(self):\n class C:\n pass\n self.assertEqual(C.__bases__, (object,))\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__bases__, (object,))\n\n def test_dict_class(self):\n class C:\n pass\n\n hooked = self._callFUT(C)\n self.assertEqual(hooked.__dict__, C.__dict__)\n\n def test_non_string_attr_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n with self.assertRaises(TypeError):\n getattr(hooked, 42)\n\n with self.assertRaises(TypeError):\n hooked.__getattribute__(42)\n\n def test_unicode_attribute_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n result = hooked.__getattribute__('__bases__')\n self.assertEqual(result, ())\n\n def test_short_name(self):\n # Specifically for the C implementation, which has to deal with this\n hooked = self._callFUT(return_foo)\n with self.assertRaises(AttributeError):\n hooked.__getattribute__('')\n\n\nclass HookableTests(HookableMixin, PyHookableTests):\n pass\n\n\nclass TestIssue6(HookableMixin, TestIssue6Py):\n pass\n\n\ndef test_suite():\n return unittest.defaultTestLoader.loadTestsFromName(__name__)\n",
"step-ids": [
20,
24,
28,
30,
35
]
}
|
[
20,
24,
28,
30,
35
] |
<|reserved_special_token_0|>
def compare(mystring):
def usd_to_ngn():
print('Getting USD to NGN Rate')
req = requests.get(
'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'
)
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(' ', '+')
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.
format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1]
prices = [''.join(re.findall('([\\S]?)([0-9\\.]+)', i)[0]) for i in
prices]
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
search_term = mystring.replace(' ', '+')
my_url = 'https://www.konga.com/search?search='
new = my_url + search_term
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
price_tags = page_soup.select('span.d7c0f_sJAqi')
prices = [float(str(el.contents[1]).replace(',', '')) for el in
price_tags[:30]]
return prices
konga = konga(mystring)
amazon = amazon(mystring)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg
(amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if
len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if
len(amazon) < 1 else amazon}
print(obj)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compare(mystring):
def usd_to_ngn():
print('Getting USD to NGN Rate')
req = requests.get(
'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'
)
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(' ', '+')
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.
format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1]
prices = [''.join(re.findall('([\\S]?)([0-9\\.]+)', i)[0]) for i in
prices]
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
search_term = mystring.replace(' ', '+')
my_url = 'https://www.konga.com/search?search='
new = my_url + search_term
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
price_tags = page_soup.select('span.d7c0f_sJAqi')
prices = [float(str(el.contents[1]).replace(',', '')) for el in
price_tags[:30]]
return prices
konga = konga(mystring)
amazon = amazon(mystring)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg
(amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if
len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if
len(amazon) < 1 else amazon}
print(obj)
if len(sys.argv) > 1:
compare(' '.join(sys.argv[1:]))
<|reserved_special_token_0|>
compare(term)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def compare(mystring):
def usd_to_ngn():
print('Getting USD to NGN Rate')
req = requests.get(
'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'
)
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(' ', '+')
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.
format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1]
prices = [''.join(re.findall('([\\S]?)([0-9\\.]+)', i)[0]) for i in
prices]
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
search_term = mystring.replace(' ', '+')
my_url = 'https://www.konga.com/search?search='
new = my_url + search_term
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
price_tags = page_soup.select('span.d7c0f_sJAqi')
prices = [float(str(el.contents[1]).replace(',', '')) for el in
price_tags[:30]]
return prices
konga = konga(mystring)
amazon = amazon(mystring)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg
(amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if
len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if
len(amazon) < 1 else amazon}
print(obj)
if len(sys.argv) > 1:
compare(' '.join(sys.argv[1:]))
term = str(input('enter your search term: '))
compare(term)
<|reserved_special_token_1|>
import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import time
from urllib.request import Request
import requests
import json
import re
import sys
def compare(mystring):
def usd_to_ngn():
print('Getting USD to NGN Rate')
req = requests.get(
'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'
)
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(' ', '+')
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.
format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1]
prices = [''.join(re.findall('([\\S]?)([0-9\\.]+)', i)[0]) for i in
prices]
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
search_term = mystring.replace(' ', '+')
my_url = 'https://www.konga.com/search?search='
new = my_url + search_term
header = {'User-agent':
'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'
}
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
price_tags = page_soup.select('span.d7c0f_sJAqi')
prices = [float(str(el.contents[1]).replace(',', '')) for el in
price_tags[:30]]
return prices
konga = konga(mystring)
amazon = amazon(mystring)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg
(amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if
len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if
len(amazon) < 1 else amazon}
print(obj)
if len(sys.argv) > 1:
compare(' '.join(sys.argv[1:]))
term = str(input('enter your search term: '))
compare(term)
<|reserved_special_token_1|>
import bs4
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import pandas as pd
import time
from urllib.request import Request
import requests
import json
import re
import sys
def compare(mystring):
def usd_to_ngn():
print("Getting USD to NGN Rate")
req = requests.get("http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63")
req.raise_for_status()
res = str(req.content)[2:-1]
res = json.loads(res)
rate = float(res['results']['USD_NGN']['val'])
return rate
def amazon(mystring):
search_term = mystring.replace(" ", "+")
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
html = Request("https://www.amazon.com/s?k={}&ref=nb_sb_noss_1".format(search_term), headers=header)
time.sleep(10)
page_html2 = uReq(html).read()
page_soup = soup(page_html2, 'html.parser')
price_tags1 = page_soup.select('span.a-offscreen')
prices = [el.get_text() for el in price_tags1] # get text
# print(f"1 : {prices}")
prices = ["".join(re.findall("([\S]?)([0-9\.]+)", i)[0]) for i in prices]
# ^ remove spaces, and get the price range minimum, with the currency
rate = usd_to_ngn()
prices = [(float(i[1:]) * rate) for i in prices]
return prices
def konga(mystring):
#mystring = (input('enter your search term: '))
search_term = mystring.replace(" ", "+")
my_url = 'https://www.konga.com/search?search='
new = my_url+search_term
header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}
#print(new)
request = Request(new, headers=header)
time.sleep(10)
response = uReq(request).read()
page_soup = soup(response, 'html.parser')
#price_containers = page_soup.find_all('span', {'class':'d7c0f_sJAqi'})
#containers = page_soup.find_all('div', {'class':'af885_1iPzH'})
price_tags = page_soup.select("span.d7c0f_sJAqi")
prices = [float(str(el.contents[1]).replace(",", "")) for el in price_tags[:30]]
return prices
konga = konga(mystring)
# print(konga)
amazon = amazon(mystring)
# print(alibaba)
"""
if len(konga) > len(alibaba) > 0:
konga = konga[:len(alibaba)]
elif len(konga) > 0:
alibaba = alibaba[:len(konga)]
"""
def find_avg(lst):
if len(lst) < 1:
return None
avg = 0
for i in lst:
avg += i
return avg / len(lst)
obj = {"avg_konga_price": find_avg(konga), "avg_Amazon_price": find_avg(amazon),
"currency" : "NGN",
'konga' : ("Unable To Fetch Prices" if (len(konga) < 1) else konga),
'amazon' : ("Unable To Fetch Prices" if (len(amazon) < 1) else amazon)}
# print(f"k = {konga} : a = {alibaba}")
print(obj)
if len(sys.argv) > 1:
compare(" ".join(sys.argv[1:]))
# Uncomment the code below to run a test with query='diamond jewelry'
term = str(input('enter your search term: '))
compare(term)
|
flexible
|
{
"blob_id": "d96038a715406388b4de4611391dee18fc559d5a",
"index": 2693,
"step-1": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\n<mask token>\ncompare(term)\n",
"step-3": "<mask token>\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\nterm = str(input('enter your search term: '))\ncompare(term)\n",
"step-4": "import bs4\nfrom urllib.request import urlopen as uReq\nfrom bs4 import BeautifulSoup as soup\nimport pandas as pd\nimport time\nfrom urllib.request import Request\nimport requests\nimport json\nimport re\nimport sys\n\n\ndef compare(mystring):\n\n def usd_to_ngn():\n print('Getting USD to NGN Rate')\n req = requests.get(\n 'http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63'\n )\n req.raise_for_status()\n res = str(req.content)[2:-1]\n res = json.loads(res)\n rate = float(res['results']['USD_NGN']['val'])\n return rate\n\n def amazon(mystring):\n search_term = mystring.replace(' ', '+')\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n html = Request('https://www.amazon.com/s?k={}&ref=nb_sb_noss_1'.\n format(search_term), headers=header)\n time.sleep(10)\n page_html2 = uReq(html).read()\n page_soup = soup(page_html2, 'html.parser')\n price_tags1 = page_soup.select('span.a-offscreen')\n prices = [el.get_text() for el in price_tags1]\n prices = [''.join(re.findall('([\\\\S]?)([0-9\\\\.]+)', i)[0]) for i in\n prices]\n rate = usd_to_ngn()\n prices = [(float(i[1:]) * rate) for i in prices]\n return prices\n\n def konga(mystring):\n search_term = mystring.replace(' ', '+')\n my_url = 'https://www.konga.com/search?search='\n new = my_url + search_term\n header = {'User-agent':\n 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'\n }\n request = Request(new, headers=header)\n time.sleep(10)\n response = uReq(request).read()\n page_soup = soup(response, 'html.parser')\n price_tags = page_soup.select('span.d7c0f_sJAqi')\n prices = [float(str(el.contents[1]).replace(',', '')) for el in\n price_tags[:30]]\n return prices\n konga = konga(mystring)\n amazon = amazon(mystring)\n \"\"\"\n if len(konga) > len(alibaba) > 0:\n konga = konga[:len(alibaba)]\n elif len(konga) > 0:\n alibaba = alibaba[:len(konga)]\n \"\"\"\n\n def find_avg(lst):\n if len(lst) < 1:\n return None\n avg = 0\n for i in lst:\n avg += i\n return avg / len(lst)\n obj = {'avg_konga_price': find_avg(konga), 'avg_Amazon_price': find_avg\n (amazon), 'currency': 'NGN', 'konga': 'Unable To Fetch Prices' if \n len(konga) < 1 else konga, 'amazon': 'Unable To Fetch Prices' if \n len(amazon) < 1 else amazon}\n print(obj)\n\n\nif len(sys.argv) > 1:\n compare(' '.join(sys.argv[1:]))\nterm = str(input('enter your search term: '))\ncompare(term)\n",
"step-5": "import bs4\r\nfrom urllib.request import urlopen as uReq\r\nfrom bs4 import BeautifulSoup as soup\r\nimport pandas as pd\r\nimport time\r\nfrom urllib.request import Request\r\nimport requests\r\nimport json\r\nimport re\r\nimport sys\r\n\r\n\r\ndef compare(mystring):\r\n def usd_to_ngn():\r\n print(\"Getting USD to NGN Rate\")\r\n req = requests.get(\"http://free.currconv.com/api/v7/convert?q=USD_NGN&apiKey=5029a99b396929294f63\")\r\n req.raise_for_status()\r\n\r\n res = str(req.content)[2:-1]\r\n res = json.loads(res)\r\n\r\n rate = float(res['results']['USD_NGN']['val'])\r\n return rate\r\n \r\n def amazon(mystring):\r\n search_term = mystring.replace(\" \", \"+\")\r\n header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}\r\n html = Request(\"https://www.amazon.com/s?k={}&ref=nb_sb_noss_1\".format(search_term), headers=header)\r\n time.sleep(10)\r\n page_html2 = uReq(html).read()\r\n page_soup = soup(page_html2, 'html.parser')\r\n price_tags1 = page_soup.select('span.a-offscreen')\r\n prices = [el.get_text() for el in price_tags1] # get text\r\n # print(f\"1 : {prices}\")\r\n prices = [\"\".join(re.findall(\"([\\S]?)([0-9\\.]+)\", i)[0]) for i in prices]\r\n # ^ remove spaces, and get the price range minimum, with the currency\r\n rate = usd_to_ngn()\r\n prices = [(float(i[1:]) * rate) for i in prices] \r\n return prices\r\n\r\n \r\n \r\n def konga(mystring):\r\n #mystring = (input('enter your search term: '))\r\n search_term = mystring.replace(\" \", \"+\")\r\n my_url = 'https://www.konga.com/search?search='\r\n new = my_url+search_term\r\n header = {'User-agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.1.5) Gecko/20091102 Firefox/3.5.5'}\r\n #print(new)\r\n request = Request(new, headers=header)\r\n time.sleep(10)\r\n response = uReq(request).read()\r\n page_soup = soup(response, 'html.parser')\r\n #price_containers = page_soup.find_all('span', {'class':'d7c0f_sJAqi'})\r\n #containers = page_soup.find_all('div', {'class':'af885_1iPzH'})\r\n price_tags = page_soup.select(\"span.d7c0f_sJAqi\")\r\n prices = [float(str(el.contents[1]).replace(\",\", \"\")) for el in price_tags[:30]]\r\n return prices\r\n\r\n \r\n \r\n konga = konga(mystring)\r\n # print(konga)\r\n amazon = amazon(mystring)\r\n # print(alibaba)\r\n \"\"\"\r\n if len(konga) > len(alibaba) > 0:\r\n konga = konga[:len(alibaba)]\r\n elif len(konga) > 0:\r\n alibaba = alibaba[:len(konga)]\r\n \"\"\"\r\n def find_avg(lst):\r\n if len(lst) < 1:\r\n return None\r\n avg = 0\r\n for i in lst:\r\n avg += i\r\n return avg / len(lst)\r\n\r\n obj = {\"avg_konga_price\": find_avg(konga), \"avg_Amazon_price\": find_avg(amazon),\r\n \"currency\" : \"NGN\",\r\n 'konga' : (\"Unable To Fetch Prices\" if (len(konga) < 1) else konga),\r\n 'amazon' : (\"Unable To Fetch Prices\" if (len(amazon) < 1) else amazon)}\r\n # print(f\"k = {konga} : a = {alibaba}\")\r\n print(obj)\r\n\r\n\r\nif len(sys.argv) > 1:\r\n compare(\" \".join(sys.argv[1:]))\r\n\r\n# Uncomment the code below to run a test with query='diamond jewelry'\r\nterm = str(input('enter your search term: '))\r\ncompare(term)\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('healthviolations.csv', 'w') as fp:
with open('Restaurant_Inspections.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
header = next(reader, None)
writer = csv.writer(fp, delimiter=',')
writer.writerow([header[0], 'violation'])
for row in reader:
if row[20] != '':
violationarr = row[20].split(',')
for violation in violationarr:
writer.writerow([row[0], violation])
<|reserved_special_token_1|>
import csv
with open('healthviolations.csv', 'w') as fp:
with open('Restaurant_Inspections.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
header = next(reader, None)
writer = csv.writer(fp, delimiter=',')
writer.writerow([header[0], 'violation'])
for row in reader:
if row[20] != '':
violationarr = row[20].split(',')
for violation in violationarr:
writer.writerow([row[0], violation])
<|reserved_special_token_1|>
import csv
with open('healthviolations.csv', 'w') as fp:
with open('Restaurant_Inspections.csv', 'rb') as csvfile:
reader = csv.reader(csvfile)
header = next(reader, None)
writer = csv.writer(fp, delimiter=',')
writer.writerow([header[0], "violation"])
for row in reader:
if (row[20] != '') :
violationarr = row[20].split(",")
for violation in violationarr :
writer.writerow([row[0], violation])
# writer.writerow([header[0], header[1], "violation"])
# writer.writerow([row[0], row[1], violation])
|
flexible
|
{
"blob_id": "1825b365032a224ed56a1814d7f6457e2add8fdd",
"index": 8008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('healthviolations.csv', 'w') as fp:\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], 'violation'])\n for row in reader:\n if row[20] != '':\n violationarr = row[20].split(',')\n for violation in violationarr:\n writer.writerow([row[0], violation])\n",
"step-3": "import csv\nwith open('healthviolations.csv', 'w') as fp:\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], 'violation'])\n for row in reader:\n if row[20] != '':\n violationarr = row[20].split(',')\n for violation in violationarr:\n writer.writerow([row[0], violation])\n",
"step-4": "import csv\n\nwith open('healthviolations.csv', 'w') as fp:\n\n with open('Restaurant_Inspections.csv', 'rb') as csvfile:\n reader = csv.reader(csvfile)\n header = next(reader, None)\n writer = csv.writer(fp, delimiter=',')\n writer.writerow([header[0], \"violation\"])\n for row in reader:\n if (row[20] != '') :\n violationarr = row[20].split(\",\")\n for violation in violationarr :\n writer.writerow([row[0], violation])\n\n\n# writer.writerow([header[0], header[1], \"violation\"])\n# writer.writerow([row[0], row[1], violation])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == '__main__':
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}
print(post(full_url, api_key, body))
time.sleep(1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == '__main__':
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}
print(post(full_url, api_key, body))
time.sleep(1)
<|reserved_special_token_1|>
import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == '__main__':
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}
print(post(full_url, api_key, body))
time.sleep(1)
<|reserved_special_token_1|>
import json
import requests
import boto3
import uuid
import time
profile_name = 'mine'
region = 'us-west-2'
session = boto3.Session(profile_name=profile_name)
api = session.client('apigateway', region_name=region)
cf = session.client('cloudformation', region_name=region)
def get_key(name_of_key):
print('Discovering API Key')
response = api.get_api_keys(includeValues=True)
items = response['items']
for item in items:
if name_of_key in item['name']:
return item['value']
def get_url(name_of_stack):
print('Discovering Cloudformation Exports')
exports = cf.list_exports()['Exports']
for export in exports:
if export['Name'] == 'url-{}'.format(name_of_stack):
return export['Value']
def post(url, key, data):
data_json = json.dumps(data)
headers = {'Content-type': 'application/json', 'x-api-key': key}
return requests.post(url, data=data_json, headers=headers)
if __name__ == "__main__":
name = 'advanced'
full_url = get_url(name)
api_key = get_key(name)
while True:
body = {
"input": [
str(uuid.uuid4()),
str(uuid.uuid4())
]
}
print(post(full_url, api_key, body))
time.sleep(1)
|
flexible
|
{
"blob_id": "10fda09f47c292cb3dc901f42d38ead7757460f5",
"index": 3699,
"step-1": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-3": "<mask token>\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-4": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == '__main__':\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {'input': [str(uuid.uuid4()), str(uuid.uuid4())]}\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-5": "import json\nimport requests\nimport boto3\nimport uuid\nimport time\n\nprofile_name = 'mine'\nregion = 'us-west-2'\nsession = boto3.Session(profile_name=profile_name)\napi = session.client('apigateway', region_name=region)\ncf = session.client('cloudformation', region_name=region)\n\n\ndef get_key(name_of_key):\n print('Discovering API Key')\n response = api.get_api_keys(includeValues=True)\n items = response['items']\n for item in items:\n if name_of_key in item['name']:\n return item['value']\n\n\ndef get_url(name_of_stack):\n print('Discovering Cloudformation Exports')\n exports = cf.list_exports()['Exports']\n for export in exports:\n if export['Name'] == 'url-{}'.format(name_of_stack):\n return export['Value']\n\n\ndef post(url, key, data):\n data_json = json.dumps(data)\n headers = {'Content-type': 'application/json', 'x-api-key': key}\n return requests.post(url, data=data_json, headers=headers)\n\n\nif __name__ == \"__main__\":\n name = 'advanced'\n full_url = get_url(name)\n api_key = get_key(name)\n while True:\n body = {\n \"input\": [\n str(uuid.uuid4()),\n str(uuid.uuid4())\n ]\n }\n print(post(full_url, api_key, body))\n time.sleep(1)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class ConfigTestcase(unittest.TestCase):
<|reserved_special_token_0|>
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':
['DE']})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},
'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',
'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},
'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',
'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},
'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],
'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':
['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',
'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],
'SE-SE2': ['SE-SE1']})
def test_generate_zone_neighbours_GB(self):
exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},
'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}
zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],
'GB-NIR': ['GB'], 'GB-ORK': ['GB']})
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {'DE->FR': {'parsers': {}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(config.
ZONES_CONFIG, config.EXCHANGES_CONFIG)
self.assertIn('DK-DK1', zone_neighbours.keys())
dk_neighbours = zone_neighbours['DK-DK1']
self.assertGreater(len(dk_neighbours), 1,
'expected a few neighbours for DK-DK1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':
['DE']})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},
'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',
'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},
'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',
'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},
'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],
'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':
['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',
'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],
'SE-SE2': ['SE-SE1']})
def test_generate_zone_neighbours_GB(self):
exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},
'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}
zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],
'GB-NIR': ['GB'], 'GB-ORK': ['GB']})
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {'DE->FR': {'parsers': {}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(config.
ZONES_CONFIG, config.EXCHANGES_CONFIG)
self.assertIn('DK-DK1', zone_neighbours.keys())
dk_neighbours = zone_neighbours['DK-DK1']
self.assertGreater(len(dk_neighbours), 1,
'expected a few neighbours for DK-DK1')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':
['DE']})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},
'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',
'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},
'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',
'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},
'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],
'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':
['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',
'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],
'SE-SE2': ['SE-SE1']})
def test_generate_zone_neighbours_GB(self):
exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},
'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}
zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],
'GB-NIR': ['GB'], 'GB-ORK': ['GB']})
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {'DE->FR': {'parsers': {}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(config.
ZONES_CONFIG, config.EXCHANGES_CONFIG)
self.assertIn('DK-DK1', zone_neighbours.keys())
dk_neighbours = zone_neighbours['DK-DK1']
self.assertGreater(len(dk_neighbours), 1,
'expected a few neighbours for DK-DK1')
if __name__ == '__main__':
unittest.main(buffer=True)
<|reserved_special_token_1|>
import json
import unittest
from pathlib import Path
from deepdiff import DeepDiff
from electricitymap.contrib import config
CONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}
zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':
['DE']})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},
'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},
'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',
'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},
'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',
'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},
'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],
'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':
['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}
zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',
'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],
'SE-SE2': ['SE-SE1']})
def test_generate_zone_neighbours_GB(self):
exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},
'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}
zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],
'GB-NIR': ['GB'], 'GB-ORK': ['GB']})
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {'DE->FR': {'parsers': {}}}
zones = {'DE': {}, 'FR': {}}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(config.
ZONES_CONFIG, config.EXCHANGES_CONFIG)
self.assertIn('DK-DK1', zone_neighbours.keys())
dk_neighbours = zone_neighbours['DK-DK1']
self.assertGreater(len(dk_neighbours), 1,
'expected a few neighbours for DK-DK1')
if __name__ == '__main__':
unittest.main(buffer=True)
<|reserved_special_token_1|>
import json
import unittest
from pathlib import Path
from deepdiff import DeepDiff
from electricitymap.contrib import config
CONFIG_DIR = Path(__file__).parent.parent.joinpath("config").resolve()
class ConfigTestcase(unittest.TestCase):
def test_generate_zone_neighbours_two_countries(self):
exchanges = {
"DE->FR": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["FR"], "FR": ["DE"]})
def test_generate_zone_neighbours_one_country_one_subzone(self):
exchanges = {
"DE->SE-SE4": {"parsers": {"exchange": "source"}},
}
zones = {
"DE": {},
"SE": {
"subZoneNames": ["SE-SE4"],
},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {"DE": ["SE-SE4"], "SE-SE4": ["DE"]})
def test_generate_zone_neighbours_two_subzones(self):
exchanges = {
"NO-NO1->SE-SE3": {"parsers": {"exchange": "source"}},
"NO-NO3->SE-SE2": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE1": {"parsers": {"exchange": "source"}},
"NO-NO4->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"NO": {
"subZoneNames": ["NO-NO1", "NO-NO2", "NO-NO3", "NO-NO4", "NO-NO5"],
},
"NO-NO1": {},
"NO-NO2": {},
"NO-NO3": {},
"NO-NO4": {},
"NO-NO5": {},
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{
"NO-NO1": ["SE-SE3"],
"NO-NO3": ["SE-SE2"],
"NO-NO4": ["SE-SE1", "SE-SE2"],
"SE-SE1": ["NO-NO4"],
"SE-SE2": ["NO-NO3", "NO-NO4"],
"SE-SE3": ["NO-NO1"],
},
)
def test_generate_zone_neighbours_two_subzones_from_same(self):
exchanges = {
"SE-SE1->SE-SE2": {"parsers": {"exchange": "source"}},
}
zones = {
"SE": {
"subZoneNames": ["SE-SE1", "SE-SE2", "SE-SE3", "SE-SE4"],
},
"SE-SE1": {},
"SE-SE2": {},
"SE-SE3": {},
"SE-SE4": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"SE-SE1": ["SE-SE2"], "SE-SE2": ["SE-SE1"]},
)
def test_generate_zone_neighbours_GB(self):
# That's an interesting case as GB has islands, which are not subzones
# It means that GB->GB-NIR are valid exchanges and that
# GB and GB-NIR are neighbours
exchanges = {
"GB->GB-NIR": {"parsers": {"exchange": "source"}},
"GB->GB-ORK": {"parsers": {"exchange": "source"}},
}
zones = {
"GB": {},
"GB-NIR": {},
"GB-ORK": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(
zone_neighbours,
{"GB": ["GB-NIR", "GB-ORK"], "GB-NIR": ["GB"], "GB-ORK": ["GB"]},
)
def test_generate_zone_neighbours_no_exchange_parser(self):
exchanges = {
"DE->FR": {"parsers": {}},
}
zones = {
"DE": {},
"FR": {},
}
zone_neighbours = config.generate_zone_neighbours(zones, exchanges)
self.assertDictEqual(zone_neighbours, {})
def test_ZONE_NEIGHBOURS(self):
zone_neighbours = config.generate_zone_neighbours(
config.ZONES_CONFIG, config.EXCHANGES_CONFIG
)
self.assertIn("DK-DK1", zone_neighbours.keys())
dk_neighbours = zone_neighbours["DK-DK1"]
self.assertGreater(
len(dk_neighbours), 1, "expected a few neighbours for DK-DK1"
)
if __name__ == "__main__":
unittest.main(buffer=True)
|
flexible
|
{
"blob_id": "22b8ecfecc0e76d758f14dea865a426db56c6343",
"index": 3538,
"step-1": "<mask token>\n\n\nclass ConfigTestcase(unittest.TestCase):\n <mask token>\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\n<mask token>\n",
"step-3": "<mask token>\nCONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\nif __name__ == '__main__':\n unittest.main(buffer=True)\n",
"step-4": "import json\nimport unittest\nfrom pathlib import Path\nfrom deepdiff import DeepDiff\nfrom electricitymap.contrib import config\nCONFIG_DIR = Path(__file__).parent.parent.joinpath('config').resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {'DE->FR': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['FR'], 'FR': ['DE']})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {'DE->SE-SE4': {'parsers': {'exchange': 'source'}}}\n zones = {'DE': {}, 'SE': {'subZoneNames': ['SE-SE4']}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'DE': ['SE-SE4'], 'SE-SE4':\n ['DE']})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {'NO-NO1->SE-SE3': {'parsers': {'exchange': 'source'}},\n 'NO-NO3->SE-SE2': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE1': {'parsers': {'exchange': 'source'}},\n 'NO-NO4->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'NO': {'subZoneNames': ['NO-NO1', 'NO-NO2', 'NO-NO3',\n 'NO-NO4', 'NO-NO5']}, 'NO-NO1': {}, 'NO-NO2': {}, 'NO-NO3': {},\n 'NO-NO4': {}, 'NO-NO5': {}, 'SE': {'subZoneNames': ['SE-SE1',\n 'SE-SE2', 'SE-SE3', 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {},\n 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'NO-NO1': ['SE-SE3'],\n 'NO-NO3': ['SE-SE2'], 'NO-NO4': ['SE-SE1', 'SE-SE2'], 'SE-SE1':\n ['NO-NO4'], 'SE-SE2': ['NO-NO3', 'NO-NO4'], 'SE-SE3': ['NO-NO1']})\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {'SE-SE1->SE-SE2': {'parsers': {'exchange': 'source'}}}\n zones = {'SE': {'subZoneNames': ['SE-SE1', 'SE-SE2', 'SE-SE3',\n 'SE-SE4']}, 'SE-SE1': {}, 'SE-SE2': {}, 'SE-SE3': {}, 'SE-SE4': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'SE-SE1': ['SE-SE2'],\n 'SE-SE2': ['SE-SE1']})\n\n def test_generate_zone_neighbours_GB(self):\n exchanges = {'GB->GB-NIR': {'parsers': {'exchange': 'source'}},\n 'GB->GB-ORK': {'parsers': {'exchange': 'source'}}}\n zones = {'GB': {}, 'GB-NIR': {}, 'GB-ORK': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {'GB': ['GB-NIR', 'GB-ORK'],\n 'GB-NIR': ['GB'], 'GB-ORK': ['GB']})\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {'DE->FR': {'parsers': {}}}\n zones = {'DE': {}, 'FR': {}}\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(config.\n ZONES_CONFIG, config.EXCHANGES_CONFIG)\n self.assertIn('DK-DK1', zone_neighbours.keys())\n dk_neighbours = zone_neighbours['DK-DK1']\n self.assertGreater(len(dk_neighbours), 1,\n 'expected a few neighbours for DK-DK1')\n\n\nif __name__ == '__main__':\n unittest.main(buffer=True)\n",
"step-5": "import json\nimport unittest\nfrom pathlib import Path\n\nfrom deepdiff import DeepDiff\n\nfrom electricitymap.contrib import config\n\nCONFIG_DIR = Path(__file__).parent.parent.joinpath(\"config\").resolve()\n\n\nclass ConfigTestcase(unittest.TestCase):\n def test_generate_zone_neighbours_two_countries(self):\n exchanges = {\n \"DE->FR\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"DE\": {},\n \"FR\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {\"DE\": [\"FR\"], \"FR\": [\"DE\"]})\n\n def test_generate_zone_neighbours_one_country_one_subzone(self):\n exchanges = {\n \"DE->SE-SE4\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"DE\": {},\n \"SE\": {\n \"subZoneNames\": [\"SE-SE4\"],\n },\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {\"DE\": [\"SE-SE4\"], \"SE-SE4\": [\"DE\"]})\n\n def test_generate_zone_neighbours_two_subzones(self):\n exchanges = {\n \"NO-NO1->SE-SE3\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO3->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO4->SE-SE1\": {\"parsers\": {\"exchange\": \"source\"}},\n \"NO-NO4->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"NO\": {\n \"subZoneNames\": [\"NO-NO1\", \"NO-NO2\", \"NO-NO3\", \"NO-NO4\", \"NO-NO5\"],\n },\n \"NO-NO1\": {},\n \"NO-NO2\": {},\n \"NO-NO3\": {},\n \"NO-NO4\": {},\n \"NO-NO5\": {},\n \"SE\": {\n \"subZoneNames\": [\"SE-SE1\", \"SE-SE2\", \"SE-SE3\", \"SE-SE4\"],\n },\n \"SE-SE1\": {},\n \"SE-SE2\": {},\n \"SE-SE3\": {},\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\n \"NO-NO1\": [\"SE-SE3\"],\n \"NO-NO3\": [\"SE-SE2\"],\n \"NO-NO4\": [\"SE-SE1\", \"SE-SE2\"],\n \"SE-SE1\": [\"NO-NO4\"],\n \"SE-SE2\": [\"NO-NO3\", \"NO-NO4\"],\n \"SE-SE3\": [\"NO-NO1\"],\n },\n )\n\n def test_generate_zone_neighbours_two_subzones_from_same(self):\n exchanges = {\n \"SE-SE1->SE-SE2\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"SE\": {\n \"subZoneNames\": [\"SE-SE1\", \"SE-SE2\", \"SE-SE3\", \"SE-SE4\"],\n },\n \"SE-SE1\": {},\n \"SE-SE2\": {},\n \"SE-SE3\": {},\n \"SE-SE4\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\"SE-SE1\": [\"SE-SE2\"], \"SE-SE2\": [\"SE-SE1\"]},\n )\n\n def test_generate_zone_neighbours_GB(self):\n # That's an interesting case as GB has islands, which are not subzones\n # It means that GB->GB-NIR are valid exchanges and that\n # GB and GB-NIR are neighbours\n exchanges = {\n \"GB->GB-NIR\": {\"parsers\": {\"exchange\": \"source\"}},\n \"GB->GB-ORK\": {\"parsers\": {\"exchange\": \"source\"}},\n }\n zones = {\n \"GB\": {},\n \"GB-NIR\": {},\n \"GB-ORK\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(\n zone_neighbours,\n {\"GB\": [\"GB-NIR\", \"GB-ORK\"], \"GB-NIR\": [\"GB\"], \"GB-ORK\": [\"GB\"]},\n )\n\n def test_generate_zone_neighbours_no_exchange_parser(self):\n exchanges = {\n \"DE->FR\": {\"parsers\": {}},\n }\n zones = {\n \"DE\": {},\n \"FR\": {},\n }\n zone_neighbours = config.generate_zone_neighbours(zones, exchanges)\n self.assertDictEqual(zone_neighbours, {})\n\n def test_ZONE_NEIGHBOURS(self):\n zone_neighbours = config.generate_zone_neighbours(\n config.ZONES_CONFIG, config.EXCHANGES_CONFIG\n )\n self.assertIn(\"DK-DK1\", zone_neighbours.keys())\n dk_neighbours = zone_neighbours[\"DK-DK1\"]\n\n self.assertGreater(\n len(dk_neighbours), 1, \"expected a few neighbours for DK-DK1\"\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(buffer=True)\n",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
"""
"""
import os
from alert_triage.util import filelock
MODIFIED_ALERTS_FILE = "/tmp/alert_triage_modified_alerts"
def read_modified_alert_ids():
""" Read modified alert IDs from file, then remove them from the file."""
# Return an empty list if the file doesn't exist.
if not os.path.exists(MODIFIED_ALERTS_FILE):
return []
# Get a lock on the file
lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)
lock.acquire()
# Open the file and read in the data.
fp = open(MODIFIED_ALERTS_FILE, "r+")
ids = fp.read().split("\n")
# remove zero length strings
ids = filter(len, ids)
# convert IDs to int
ids = list(map(int, ids))
# remove duplicates
ids = list(set(ids))
# close and remove the file
fp.close()
#TODO: uncomment when live
#os.unlink(MODIFIED_ALERTS_FILE)
# Release the lock.
lock.release()
return ids
def write_modified_alert_ids(ids):
# Get a lock on the file
lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)
lock.acquire()
# Open the file and write the alert IDs.
fp = open(MODIFIED_ALERTS_FILE, "a")
for alert_id in ids:
fp.write(str(alert_id) + "\n")
fp.close()
# Release the lock.
lock.release()
|
normal
|
{
"blob_id": "90ae14d8af163343520365a5565a7c44de57059d",
"index": 5662,
"step-1": "<mask token>\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n",
"step-3": "<mask token>\nMODIFIED_ALERTS_FILE = '/tmp/alert_triage_modified_alerts'\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n",
"step-4": "<mask token>\nimport os\nfrom alert_triage.util import filelock\nMODIFIED_ALERTS_FILE = '/tmp/alert_triage_modified_alerts'\n\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'r+')\n ids = fp.read().split('\\n')\n ids = filter(len, ids)\n ids = list(map(int, ids))\n ids = list(set(ids))\n fp.close()\n lock.release()\n return ids\n\n\ndef write_modified_alert_ids(ids):\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n fp = open(MODIFIED_ALERTS_FILE, 'a')\n for alert_id in ids:\n fp.write(str(alert_id) + '\\n')\n fp.close()\n lock.release()\n",
"step-5": "\"\"\"\n\"\"\"\n\nimport os\n\nfrom alert_triage.util import filelock\n\nMODIFIED_ALERTS_FILE = \"/tmp/alert_triage_modified_alerts\"\n\ndef read_modified_alert_ids():\n \"\"\" Read modified alert IDs from file, then remove them from the file.\"\"\"\n # Return an empty list if the file doesn't exist.\n if not os.path.exists(MODIFIED_ALERTS_FILE):\n return []\n # Get a lock on the file\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n # Open the file and read in the data.\n fp = open(MODIFIED_ALERTS_FILE, \"r+\")\n ids = fp.read().split(\"\\n\")\n # remove zero length strings\n ids = filter(len, ids)\n # convert IDs to int\n ids = list(map(int, ids))\n # remove duplicates\n ids = list(set(ids))\n # close and remove the file\n fp.close()\n #TODO: uncomment when live\n #os.unlink(MODIFIED_ALERTS_FILE)\n # Release the lock.\n lock.release()\n return ids\n\ndef write_modified_alert_ids(ids):\n # Get a lock on the file\n lock = filelock.FileLock(MODIFIED_ALERTS_FILE, 5)\n lock.acquire()\n # Open the file and write the alert IDs.\n fp = open(MODIFIED_ALERTS_FILE, \"a\")\n for alert_id in ids:\n fp.write(str(alert_id) + \"\\n\")\n fp.close()\n # Release the lock.\n lock.release()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
def say_hi(argument):
return f"Hello {argument}"
def call_func(some_func, argument):
return some_func(argument)
def main(argument):
"""docstring"""
return call_func(say_hi, argument)
if __name__ == "__main__":
print(main(1))
|
normal
|
{
"blob_id": "2a3c3112122dee5574a1569155287ea3e5f8c7b2",
"index": 6120,
"step-1": "<mask token>\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\n<mask token>\n",
"step-3": "def say_hi(argument):\n return f'Hello {argument}'\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\n<mask token>\n",
"step-4": "def say_hi(argument):\n return f'Hello {argument}'\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\nif __name__ == '__main__':\n print(main(1))\n",
"step-5": "def say_hi(argument):\n return f\"Hello {argument}\"\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\nif __name__ == \"__main__\":\n print(main(1))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
<|reserved_special_token_1|>
import smbus
import time, datetime
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1
while True:
data = bus.read_i2c_block_data(80, 0, 2)
raw_adc = (data[0] & 15) * 256 + data[1]
ppm = 1.99 * raw_adc / 4096.0 + 0.01
timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]
time.sleep(sampleTime)
print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)
<|reserved_special_token_1|>
#!/usr/bin/python3
# Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# ADC121C_MQ131
# This code is designed to work with the ADC121C_I2CGAS_MQ131 I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Gas?sku=ADC121C_I2CGAS_MQ131#tabs-0-product_tabset-2
import smbus
import time, datetime
# Get I2C bus
bus = smbus.SMBus(1)
def getOzoneData():
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
return ppm
if __name__ == '__main__':
sampleTime = 1 # seconds
# ADC121C_MQ131 address, 0x50(80)
# Read data back from 0x00(00), 2 bytes
# raw_adc MSB, raw_adc LSB
while True:
data = bus.read_i2c_block_data(0x50, 0x00, 2)
# Convert the data to 12-bits
raw_adc = (data[0] & 0x0F) * 256 + data[1]
ppm = (1.99 * raw_adc) / 4096.0 + 0.01
timestmp = ((str(datetime.datetime.utcnow())).split(' ')[1]).split('.')[0]
time.sleep(sampleTime)
# Output data to screen
print(timestmp, "UTC", "Ozone Concentration : %.2f ppm" %ppm)
|
flexible
|
{
"blob_id": "678189ac5b0105c90178647843335f9d4402dc66",
"index": 1416,
"step-1": "<mask token>\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-3": "<mask token>\nbus = smbus.SMBus(1)\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-4": "import smbus\nimport time, datetime\nbus = smbus.SMBus(1)\n\n\ndef getOzoneData():\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n return ppm\n\n\nif __name__ == '__main__':\n sampleTime = 1\n while True:\n data = bus.read_i2c_block_data(80, 0, 2)\n raw_adc = (data[0] & 15) * 256 + data[1]\n ppm = 1.99 * raw_adc / 4096.0 + 0.01\n timestmp = str(datetime.datetime.utcnow()).split(' ')[1].split('.')[0]\n time.sleep(sampleTime)\n print(timestmp, 'UTC', 'Ozone Concentration : %.2f ppm' % ppm)\n",
"step-5": "#!/usr/bin/python3\n# Distributed with a free-will license.\n# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.\n# ADC121C_MQ131\n# This code is designed to work with the ADC121C_I2CGAS_MQ131 I2C Mini Module available from ControlEverything.com.\n# https://www.controleverything.com/content/Gas?sku=ADC121C_I2CGAS_MQ131#tabs-0-product_tabset-2\n\nimport smbus\nimport time, datetime\n\n# Get I2C bus\nbus = smbus.SMBus(1)\n\ndef getOzoneData():\n\tdata = bus.read_i2c_block_data(0x50, 0x00, 2)\n\n\t# Convert the data to 12-bits\n\traw_adc = (data[0] & 0x0F) * 256 + data[1]\n\tppm = (1.99 * raw_adc) / 4096.0 + 0.01\n\treturn ppm\n\nif __name__ == '__main__':\n\n\tsampleTime = 1 # seconds\n\n\t# ADC121C_MQ131 address, 0x50(80)\n\t# Read data back from 0x00(00), 2 bytes\n\t# raw_adc MSB, raw_adc LSB\n\twhile True:\n\t\tdata = bus.read_i2c_block_data(0x50, 0x00, 2)\n\n\t\t# Convert the data to 12-bits\n\t\traw_adc = (data[0] & 0x0F) * 256 + data[1]\n\t\tppm = (1.99 * raw_adc) / 4096.0 + 0.01\n\n\t\ttimestmp = ((str(datetime.datetime.utcnow())).split(' ')[1]).split('.')[0]\n\t\ttime.sleep(sampleTime)\n\n\n\t\t# Output data to screen\n\t\tprint(timestmp, \"UTC\", \"Ozone Concentration : %.2f ppm\" %ppm)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def deploy():
"""deploys the project to the server"""
with prefix('source /srv/django-envs/tweetset/bin/activate'):
with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):
with cd('/srv/django-projects/tweetset'):
puts(magenta('[Pulling changes]'))
run('git pull origin master')
puts(magenta('[Installing packages]'))
run('pip install -r requirements.txt')
with cd('/srv/django-projects/tweetset/tweetset'):
puts(magenta('[Migrating apps]'))
run('python manage.py migrate --no-initial-data')
puts(magenta('[Collecting static files]'))
run('python manage.py collectstatic --noinput')
puts(magenta('[Touching wsgi.py]'))
run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
env.hosts = ['[email protected]']
def deploy():
"""deploys the project to the server"""
with prefix('source /srv/django-envs/tweetset/bin/activate'):
with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):
with cd('/srv/django-projects/tweetset'):
puts(magenta('[Pulling changes]'))
run('git pull origin master')
puts(magenta('[Installing packages]'))
run('pip install -r requirements.txt')
with cd('/srv/django-projects/tweetset/tweetset'):
puts(magenta('[Migrating apps]'))
run('python manage.py migrate --no-initial-data')
puts(magenta('[Collecting static files]'))
run('python manage.py collectstatic --noinput')
puts(magenta('[Touching wsgi.py]'))
run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'
)
<|reserved_special_token_1|>
from __future__ import with_statement
from fabric.api import *
from fabric.colors import *
from fabric.utils import puts
from fabric.context_managers import shell_env
env.hosts = ['[email protected]']
def deploy():
"""deploys the project to the server"""
with prefix('source /srv/django-envs/tweetset/bin/activate'):
with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):
with cd('/srv/django-projects/tweetset'):
puts(magenta('[Pulling changes]'))
run('git pull origin master')
puts(magenta('[Installing packages]'))
run('pip install -r requirements.txt')
with cd('/srv/django-projects/tweetset/tweetset'):
puts(magenta('[Migrating apps]'))
run('python manage.py migrate --no-initial-data')
puts(magenta('[Collecting static files]'))
run('python manage.py collectstatic --noinput')
puts(magenta('[Touching wsgi.py]'))
run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'
)
<|reserved_special_token_1|>
from __future__ import with_statement
from fabric.api import *
from fabric.colors import *
from fabric.utils import puts
from fabric.context_managers import shell_env
env.hosts = ['[email protected]']
def deploy():
"deploys the project to the server"
with prefix('source /srv/django-envs/tweetset/bin/activate'):
with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):
with cd('/srv/django-projects/tweetset'):
puts(magenta("[Pulling changes]"))
run('git pull origin master')
puts(magenta("[Installing packages]"))
run('pip install -r requirements.txt')
with cd('/srv/django-projects/tweetset/tweetset'):
puts(magenta("[Migrating apps]"))
run('python manage.py migrate --no-initial-data')
puts(magenta("[Collecting static files]"))
run('python manage.py collectstatic --noinput')
puts(magenta("[Touching wsgi.py]"))
run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py')
|
flexible
|
{
"blob_id": "6111c9730c556ab3ab95f7685ffa135a2bbeb2ca",
"index": 5950,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-3": "<mask token>\nenv.hosts = ['[email protected]']\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-4": "from __future__ import with_statement\nfrom fabric.api import *\nfrom fabric.colors import *\nfrom fabric.utils import puts\nfrom fabric.context_managers import shell_env\nenv.hosts = ['[email protected]']\n\n\ndef deploy():\n \"\"\"deploys the project to the server\"\"\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta('[Pulling changes]'))\n run('git pull origin master')\n puts(magenta('[Installing packages]'))\n run('pip install -r requirements.txt')\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta('[Migrating apps]'))\n run('python manage.py migrate --no-initial-data')\n puts(magenta('[Collecting static files]'))\n run('python manage.py collectstatic --noinput')\n puts(magenta('[Touching wsgi.py]'))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py'\n )\n",
"step-5": "from __future__ import with_statement\nfrom fabric.api import *\nfrom fabric.colors import *\nfrom fabric.utils import puts\nfrom fabric.context_managers import shell_env\n\nenv.hosts = ['[email protected]']\n\ndef deploy():\n \"deploys the project to the server\"\n with prefix('source /srv/django-envs/tweetset/bin/activate'):\n with shell_env(DJANGO_SETTINGS_MODULE='tweetset.settings.production'):\n with cd('/srv/django-projects/tweetset'):\n puts(magenta(\"[Pulling changes]\"))\n run('git pull origin master')\n\n puts(magenta(\"[Installing packages]\"))\n run('pip install -r requirements.txt')\n\n with cd('/srv/django-projects/tweetset/tweetset'):\n puts(magenta(\"[Migrating apps]\"))\n run('python manage.py migrate --no-initial-data')\n\n puts(magenta(\"[Collecting static files]\"))\n run('python manage.py collectstatic --noinput')\n\n puts(magenta(\"[Touching wsgi.py]\"))\n run('touch /srv/django-projects/tweetset/tweetset/tweetset/wsgi.py')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = 'GazouillisTeam'
import numpy as np
import os
import sys
import time
from keras.callbacks import Callback
def save_architecture(model, path_out):
"""
Based on the keras utils 'model.summary()'
"""
# Redirect the print output the a textfile
orig_stdout = sys.stdout
# and store the architecture
f = file(os.path.join(path_out, "architecture.txt"), 'w')
sys.stdout = f
model.summary()
# Reset the print output direction
sys.stdout = orig_stdout
f.close()
open(os.path.join(path_out, "config.json"), 'w').write(model.to_json())
def create_log(path, settings, filename="log.txt"):
f = open(os.path.join(path, filename), "w")
f.writelines(str(settings))
f.writelines("\n####\nStarted on %s at %s\n" % (time.strftime("%d/%m/%Y"), time.strftime("%H:%M:%S")))
f.close()
def write_log(path, string, filename="log.txt"):
"""
Add a line at the end of a textfile.
:param path: textfile location
:param string: line to add
"""
# Open and Read
f = open(os.path.join(path, filename), "r")
lines = f.readlines()
f.close()
# Adding a line
lines.append(string)
# Write
f = open(os.path.join(path, filename), "w")
f.writelines(lines)
f.close()
class ModelSaver(Callback):
"""
Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,
the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.
The model after random can also be saved. And the model architecture is saved with the name 'config.network'.
Everything is stored using pickle.
"""
def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):
super(Callback, self).__init__()
self.verbose = verbose
self.path = path
self.path_weights = path_weights
self.monitor = monitor
self.best = np.Inf
self.h5py = h5py
def save_weights(self, path):
if not self.h5py: # H5PY not available : save weights using np.save
w = self.model.get_weights()
np.save(path+".npy", w)
else:
self.model.save_weights(path + ".h5py", overwrite=True)
def on_epoch_begin(self, epoch, logs={}):
self.epoch_start = time.time()
# Saving weights just after initialization
if epoch == 0:
save_path = os.path.join(self.path_weights, "after_initialization")
self.save_weights(save_path)
def on_epoch_end(self, epoch, logs={}):
self.epoch_end = time.time()
# get loss
monitor = logs.get(self.monitor)
# condition = True if loss decreased
condition = monitor < self.best
if condition:
# Save weights as "best_model.weights"
self.best = monitor
save_path = os.path.join(self.path_weights, "best_model")
self.save_weights(save_path)
else:
# Save weights as "last_epoch.weights"
save_path = os.path.join(self.path_weights, "last_epoch")
self.save_weights(save_path)
# Log file management
if self.verbose > 0:
log_string = "####\nEpoch %d took %d s: " % (epoch, int(self.epoch_end-self.epoch_start))
for k in logs.keys():
log_string += "%s : %.4f # " % (k, logs.get(k))
if condition:
log_string += "\tBEST"
write_log(self.path, log_string)
def trainargs2strings(path, model, dataset, index_train, index_valid, D, batch_size,
nsamples_per_epoch, nepoch, patience, lr):
settings = ""
settings += "Path : %s"%path
settings += "\nDataset shape :" + str(dataset.shape)
settings += "\nNtrain : %d"%len(index_train)
settings += "\nNvalid : %d"%len(index_valid)
settings += "\nDim : %d"%D
settings += "\nBatch size : %d"%batch_size
settings += "\nNb samples per epoch : %d"%nsamples_per_epoch
settings += "\nNb epochs : %d"%nepoch
settings += "\nPatience : %d"%patience
settings += "\nLR : %.5f"%lr
return settings
|
normal
|
{
"blob_id": "f8635c815b375dc77e971d4ea0f86547215ab2f9",
"index": 7987,
"step-1": "<mask token>\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D,\n batch_size, nsamples_per_epoch, nepoch, patience, lr):\n settings = ''\n settings += 'Path : %s' % path\n settings += '\\nDataset shape :' + str(dataset.shape)\n settings += '\\nNtrain : %d' % len(index_train)\n settings += '\\nNvalid : %d' % len(index_valid)\n settings += '\\nDim : %d' % D\n settings += '\\nBatch size : %d' % batch_size\n settings += '\\nNb samples per epoch : %d' % nsamples_per_epoch\n settings += '\\nNb epochs : %d' % nepoch\n settings += '\\nPatience : %d' % patience\n settings += '\\nLR : %.5f' % lr\n return settings\n",
"step-4": "<mask token>\n\n\ndef save_architecture(model, path_out):\n \"\"\"\n Based on the keras utils 'model.summary()'\n \"\"\"\n orig_stdout = sys.stdout\n f = file(os.path.join(path_out, 'architecture.txt'), 'w')\n sys.stdout = f\n model.summary()\n sys.stdout = orig_stdout\n f.close()\n open(os.path.join(path_out, 'config.json'), 'w').write(model.to_json())\n\n\ndef create_log(path, settings, filename='log.txt'):\n f = open(os.path.join(path, filename), 'w')\n f.writelines(str(settings))\n f.writelines('\\n####\\nStarted on %s at %s\\n' % (time.strftime(\n '%d/%m/%Y'), time.strftime('%H:%M:%S')))\n f.close()\n\n\ndef write_log(path, string, filename='log.txt'):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n f = open(os.path.join(path, filename), 'r')\n lines = f.readlines()\n f.close()\n lines.append(string)\n f = open(os.path.join(path, filename), 'w')\n f.writelines(lines)\n f.close()\n\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py:\n w = self.model.get_weights()\n np.save(path + '.npy', w)\n else:\n self.model.save_weights(path + '.h5py', overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n if epoch == 0:\n save_path = os.path.join(self.path_weights, 'after_initialization')\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n monitor = logs.get(self.monitor)\n condition = monitor < self.best\n if condition:\n self.best = monitor\n save_path = os.path.join(self.path_weights, 'best_model')\n self.save_weights(save_path)\n else:\n save_path = os.path.join(self.path_weights, 'last_epoch')\n self.save_weights(save_path)\n if self.verbose > 0:\n log_string = '####\\nEpoch %d took %d s: ' % (epoch, int(self.\n epoch_end - self.epoch_start))\n for k in logs.keys():\n log_string += '%s : %.4f # ' % (k, logs.get(k))\n if condition:\n log_string += '\\tBEST'\n write_log(self.path, log_string)\n\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D,\n batch_size, nsamples_per_epoch, nepoch, patience, lr):\n settings = ''\n settings += 'Path : %s' % path\n settings += '\\nDataset shape :' + str(dataset.shape)\n settings += '\\nNtrain : %d' % len(index_train)\n settings += '\\nNvalid : %d' % len(index_valid)\n settings += '\\nDim : %d' % D\n settings += '\\nBatch size : %d' % batch_size\n settings += '\\nNb samples per epoch : %d' % nsamples_per_epoch\n settings += '\\nNb epochs : %d' % nepoch\n settings += '\\nPatience : %d' % patience\n settings += '\\nLR : %.5f' % lr\n return settings\n",
"step-5": "__author__ = 'GazouillisTeam'\n\nimport numpy as np\nimport os\nimport sys\nimport time\n\nfrom keras.callbacks import Callback\n\ndef save_architecture(model, path_out):\n \"\"\"\n Based on the keras utils 'model.summary()'\n \"\"\"\n # Redirect the print output the a textfile\n orig_stdout = sys.stdout\n # and store the architecture\n f = file(os.path.join(path_out, \"architecture.txt\"), 'w')\n sys.stdout = f\n model.summary()\n # Reset the print output direction\n sys.stdout = orig_stdout\n f.close()\n\n open(os.path.join(path_out, \"config.json\"), 'w').write(model.to_json())\n\ndef create_log(path, settings, filename=\"log.txt\"):\n f = open(os.path.join(path, filename), \"w\")\n f.writelines(str(settings))\n f.writelines(\"\\n####\\nStarted on %s at %s\\n\" % (time.strftime(\"%d/%m/%Y\"), time.strftime(\"%H:%M:%S\")))\n f.close()\n\ndef write_log(path, string, filename=\"log.txt\"):\n \"\"\"\n Add a line at the end of a textfile.\n\n :param path: textfile location\n :param string: line to add\n \"\"\"\n # Open and Read\n f = open(os.path.join(path, filename), \"r\")\n lines = f.readlines()\n f.close()\n # Adding a line\n lines.append(string)\n # Write\n f = open(os.path.join(path, filename), \"w\")\n f.writelines(lines)\n f.close()\n\nclass ModelSaver(Callback):\n \"\"\"\n Keras callback subclass which defines a saving procedure of the model being trained : after each epoch,\n the last model is saved under the name 'after_random.cnn'. The best model is saved with the name 'best_model.cnn'.\n The model after random can also be saved. And the model architecture is saved with the name 'config.network'.\n Everything is stored using pickle.\n \"\"\"\n\n def __init__(self, path, path_weights, monitor, verbose=1, h5py=False):\n super(Callback, self).__init__()\n self.verbose = verbose\n self.path = path\n self.path_weights = path_weights\n self.monitor = monitor\n self.best = np.Inf\n self.h5py = h5py\n\n def save_weights(self, path):\n if not self.h5py: # H5PY not available : save weights using np.save\n w = self.model.get_weights()\n np.save(path+\".npy\", w)\n else:\n self.model.save_weights(path + \".h5py\", overwrite=True)\n\n def on_epoch_begin(self, epoch, logs={}):\n self.epoch_start = time.time()\n # Saving weights just after initialization\n if epoch == 0:\n save_path = os.path.join(self.path_weights, \"after_initialization\")\n self.save_weights(save_path)\n\n def on_epoch_end(self, epoch, logs={}):\n self.epoch_end = time.time()\n # get loss\n monitor = logs.get(self.monitor)\n # condition = True if loss decreased\n condition = monitor < self.best\n\n if condition:\n # Save weights as \"best_model.weights\"\n self.best = monitor\n save_path = os.path.join(self.path_weights, \"best_model\")\n self.save_weights(save_path)\n else:\n # Save weights as \"last_epoch.weights\"\n save_path = os.path.join(self.path_weights, \"last_epoch\")\n self.save_weights(save_path)\n\n # Log file management\n if self.verbose > 0:\n log_string = \"####\\nEpoch %d took %d s: \" % (epoch, int(self.epoch_end-self.epoch_start))\n for k in logs.keys():\n log_string += \"%s : %.4f # \" % (k, logs.get(k))\n if condition:\n log_string += \"\\tBEST\"\n write_log(self.path, log_string)\n\ndef trainargs2strings(path, model, dataset, index_train, index_valid, D, batch_size,\n nsamples_per_epoch, nepoch, patience, lr):\n settings = \"\"\n settings += \"Path : %s\"%path\n settings += \"\\nDataset shape :\" + str(dataset.shape)\n settings += \"\\nNtrain : %d\"%len(index_train)\n settings += \"\\nNvalid : %d\"%len(index_valid)\n settings += \"\\nDim : %d\"%D\n settings += \"\\nBatch size : %d\"%batch_size\n settings += \"\\nNb samples per epoch : %d\"%nsamples_per_epoch\n settings += \"\\nNb epochs : %d\"%nepoch\n settings += \"\\nPatience : %d\"%patience\n settings += \"\\nLR : %.5f\"%lr\n return settings",
"step-ids": [
6,
7,
8,
10,
13
]
}
|
[
6,
7,
8,
10,
13
] |
# Counts number of dumbbell curls in the video
import cv2
import mediapipe as mp
import base
import math
import numpy as np
class PoseEstimator(base.PoseDetector):
def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5,
outFile="output.mp4", outWidth=720, outHeight=1280):
super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)
self.count = 0
self.dir = 0
def findAngle(self, img, p1, p2, p3, draw=True):
x1,y1 = self.lms[p1][1:]
x2,y2 = self.lms[p2][1:]
x3,y3 = self.lms[p3][1:]
angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))
if angle<0:
angle += 360
if draw:
cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)
cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)
cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x1,y1), 12, (0,0,255), 2)
cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x2,y2), 12, (0,0,255), 2)
cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)
cv2.circle(img, (x3,y3), 12, (0,0,255), 2)
cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)
return angle
def countReps(self, img, p1, p2, p3):
angle = self.findAngle(img, p1, p2, p3)
perc = np.interp(angle, (210,320), (0,100))
color = (0,255,0)
if perc > 95:
color = (0,0,255)
if self.dir == 0:
self.count += .5
self.dir = 1
if perc == 0:
color = (255,0,0)
if self.dir == 1:
self.count += .5
self.dir = 0
cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)
bar = np.interp(perc, (0,100), (800,200))
cv2.rectangle(img, (50,200), (100,800), color, 3)
cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)
cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)
def main():
cap = cv2.VideoCapture("media/1.mp4")
estimator = PoseEstimator()
while True:
_, img = cap.read()
img = cv2.resize(img, (720, 1280))
img = estimator.findPose(img)
lms = estimator.findPosition(img, draw=False)
if len(lms)>28:
estimator.countReps(img,11,13,15)
# estimator.writeFrame(img)
cv2.imshow("Correct Pose Estimation", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
break
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "4a886437727ed6b48206e12b686a59a1d2a1c489",
"index": 4948,
"step-1": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n <mask token>\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import cv2\nimport mediapipe as mp\nimport base\nimport math\nimport numpy as np\n\n\nclass PoseEstimator(base.PoseDetector):\n\n def __init__(self, mode=False, upperBody=False, smooth=True, detectConf\n =0.5, trackConf=0.5, outFile='output.mp4', outWidth=720, outHeight=1280\n ):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf,\n outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True):\n x1, y1 = self.lms[p1][1:]\n x2, y2 = self.lms[p2][1:]\n x3, y3 = self.lms[p3][1:]\n angle = math.degrees(math.atan2(y3 - y2, x3 - x2) - math.atan2(y1 -\n y2, x1 - x2))\n if angle < 0:\n angle += 360\n if draw:\n cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 2)\n cv2.line(img, (x3, y3), (x2, y2), (255, 255, 255), 2)\n cv2.circle(img, (x1, y1), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x1, y1), 12, (0, 0, 255), 2)\n cv2.circle(img, (x2, y2), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x2, y2), 12, (0, 0, 255), 2)\n cv2.circle(img, (x3, y3), 8, (0, 0, 255), cv2.FILLED)\n cv2.circle(img, (x3, y3), 12, (0, 0, 255), 2)\n cv2.putText(img, str(int(angle)), (x2 - 40, y2 + 50), cv2.\n FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)\n return angle\n\n def countReps(self, img, p1, p2, p3):\n angle = self.findAngle(img, p1, p2, p3)\n perc = np.interp(angle, (210, 320), (0, 100))\n color = 0, 255, 0\n if perc > 95:\n color = 0, 0, 255\n if self.dir == 0:\n self.count += 0.5\n self.dir = 1\n if perc == 0:\n color = 255, 0, 0\n if self.dir == 1:\n self.count += 0.5\n self.dir = 0\n cv2.putText(img, f'{int(self.count)}', (30, 120), cv2.\n FONT_HERSHEY_PLAIN, 9, (255, 0, 0), 4)\n bar = np.interp(perc, (0, 100), (800, 200))\n cv2.rectangle(img, (50, 200), (100, 800), color, 3)\n cv2.rectangle(img, (50, int(bar)), (100, 800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30, 870), cv2.FONT_HERSHEY_PLAIN,\n 4, (255, 0, 0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture('media/1.mp4')\n estimator = PoseEstimator()\n while True:\n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n img = estimator.findPose(img)\n lms = estimator.findPosition(img, draw=False)\n if len(lms) > 28:\n estimator.countReps(img, 11, 13, 15)\n cv2.imshow('Correct Pose Estimation', img)\n if cv2.waitKey(1) & 255 == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Counts number of dumbbell curls in the video \n\nimport cv2 \nimport mediapipe as mp \nimport base\nimport math\nimport numpy as np\n\nclass PoseEstimator(base.PoseDetector): \n def __init__(self, mode=False, upperBody = False, smooth=True, detectConf=.5, trackConf=.5, \n outFile=\"output.mp4\", outWidth=720, outHeight=1280):\n super().__init__(mode, upperBody, smooth, detectConf, trackConf, outFile, outWidth, outHeight)\n self.count = 0\n self.dir = 0\n\n def findAngle(self, img, p1, p2, p3, draw=True): \n x1,y1 = self.lms[p1][1:]\n x2,y2 = self.lms[p2][1:]\n x3,y3 = self.lms[p3][1:]\n\n angle = math.degrees(math.atan2(y3-y2,x3-x2) - math.atan2(y1-y2,x1-x2))\n if angle<0: \n angle += 360\n\n if draw: \n cv2.line(img, (x1,y1), (x2,y2), (255,255,255) ,2)\n cv2.line(img, (x3,y3), (x2,y2), (255,255,255) ,2)\n cv2.circle(img, (x1,y1), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x1,y1), 12, (0,0,255), 2)\n cv2.circle(img, (x2,y2), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x2,y2), 12, (0,0,255), 2)\n cv2.circle(img, (x3,y3), 8, (0,0,255), cv2.FILLED)\n cv2.circle(img, (x3,y3), 12, (0,0,255), 2)\n cv2.putText(img, str(int(angle)), (x2-40,y2+50), cv2.FONT_HERSHEY_PLAIN, 2, (255,0,255), 2)\n\n return angle \n \n def countReps(self, img, p1, p2, p3): \n angle = self.findAngle(img, p1, p2, p3) \n perc = np.interp(angle, (210,320), (0,100))\n \n color = (0,255,0)\n if perc > 95: \n color = (0,0,255)\n if self.dir == 0: \n self.count += .5 \n self.dir = 1\n if perc == 0: \n color = (255,0,0)\n if self.dir == 1: \n self.count += .5\n self.dir = 0 \n \n cv2.putText(img, f'{int(self.count)}', (30,120), cv2.FONT_HERSHEY_PLAIN, 9, (255,0,0), 4)\n\n bar = np.interp(perc, (0,100), (800,200))\n cv2.rectangle(img, (50,200), (100,800), color, 3)\n cv2.rectangle(img, (50,int(bar)), (100,800), color, cv2.FILLED)\n cv2.putText(img, f'{int(perc)}%', (30,870), cv2.FONT_HERSHEY_PLAIN, 4, (255,0,0), 4)\n\n\ndef main():\n cap = cv2.VideoCapture(\"media/1.mp4\") \n estimator = PoseEstimator()\n\n while True: \n _, img = cap.read()\n img = cv2.resize(img, (720, 1280))\n\n img = estimator.findPose(img) \n lms = estimator.findPosition(img, draw=False) \n if len(lms)>28: \n estimator.countReps(img,11,13,15)\n\n # estimator.writeFrame(img)\n\n cv2.imshow(\"Correct Pose Estimation\", img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\nif __name__ == \"__main__\": \n main() ",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Page:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
links = [Link(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'
)]
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
scripts = [Javascript(
'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'
), Javascript(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'
)]
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
f'Page resource must be either <class "Link"> or <class "Javascript">, but got: {type(resource)};'
)
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'<title>{self.__title}</title>'
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};')
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else
'10px')
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css'
).decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
inner_script = pkg_resources.resource_string(__name__, 'generic.js'
).decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Page:
<|reserved_special_token_0|>
def __init__(self, favicon=None, resources=None, title=None, menu=None,
container=None):
super().__init__()
self.__favicon = favicon
self.__resources = resources
self.__title = title
self.__menu = menu
self.__container = container
self.__vars = {}
def __html__(self):
"""Renders an HTML page."""
return str(self)
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
links = [Link(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'
)]
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
scripts = [Javascript(
'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'
), Javascript(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'
)]
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
f'Page resource must be either <class "Link"> or <class "Javascript">, but got: {type(resource)};'
)
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'<title>{self.__title}</title>'
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};')
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else
'10px')
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css'
).decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
inner_script = pkg_resources.resource_string(__name__, 'generic.js'
).decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Page:
"""A web-page presenting container.
Args:
favicon (str): The file name for the favorite icon displayed in a
browser tab(default=None)
title (str): The page title, displayed in a browser tab (default=None).
resources (list): The list of `Link` and `Javascript` components which
representing the page resources (default=None).
menu (Menu): The page top level menu (default=None).
container (WebComponent): The page container (default=None).
"""
def __init__(self, favicon=None, resources=None, title=None, menu=None,
container=None):
super().__init__()
self.__favicon = favicon
self.__resources = resources
self.__title = title
self.__menu = menu
self.__container = container
self.__vars = {}
def __html__(self):
"""Renders an HTML page."""
return str(self)
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
links = [Link(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'
)]
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
scripts = [Javascript(
'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'
), Javascript(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'
)]
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
f'Page resource must be either <class "Link"> or <class "Javascript">, but got: {type(resource)};'
)
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'<title>{self.__title}</title>'
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};')
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else
'10px')
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css'
).decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
inner_script = pkg_resources.resource_string(__name__, 'generic.js'
).decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
"""
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import re
import pkg_resources
from .components import Link, Javascript, inject
class Page:
"""A web-page presenting container.
Args:
favicon (str): The file name for the favorite icon displayed in a
browser tab(default=None)
title (str): The page title, displayed in a browser tab (default=None).
resources (list): The list of `Link` and `Javascript` components which
representing the page resources (default=None).
menu (Menu): The page top level menu (default=None).
container (WebComponent): The page container (default=None).
"""
def __init__(self, favicon=None, resources=None, title=None, menu=None,
container=None):
super().__init__()
self.__favicon = favicon
self.__resources = resources
self.__title = title
self.__menu = menu
self.__container = container
self.__vars = {}
def __html__(self):
"""Renders an HTML page."""
return str(self)
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
links = [Link(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'
), Link(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'
)]
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
scripts = [Javascript(
'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'
), Javascript(
'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'
), Javascript(
'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'
)]
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
f'Page resource must be either <class "Link"> or <class "Javascript">, but got: {type(resource)};'
)
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'<title>{self.__title}</title>'
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};')
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else
'10px')
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css'
).decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
inner_script = pkg_resources.resource_string(__name__, 'generic.js'
).decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f"""
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
"""
<|reserved_special_token_1|>
"""
A web-page.
"""
import re
import pkg_resources
from .components import Link, Javascript, inject
class Page:
"""A web-page presenting container.
Args:
favicon (str): The file name for the favorite icon displayed in a
browser tab(default=None)
title (str): The page title, displayed in a browser tab (default=None).
resources (list): The list of `Link` and `Javascript` components which
representing the page resources (default=None).
menu (Menu): The page top level menu (default=None).
container (WebComponent): The page container (default=None).
"""
def __init__(
self,
favicon=None,
resources=None,
title=None,
menu=None,
container=None
):
super().__init__()
self.__favicon = favicon
self.__resources = resources
self.__title = title
self.__menu = menu
self.__container = container
self.__vars = {}
def __html__(self):
"""Renders an HTML page."""
return str(self)
def background(self, color=None, image=None, position=None, size=None,
repeat=None, origin=None, clip=None, attachment=None):
"""Configures the page background.
See <a href="https://www.w3schools.com/cssref/css3_pr_background.asp">
form more information.
Args:
color (str): The background color to used.
image(str): The background images to used.
position(str): The position of the background images.
size(str): The size of the background images.
repeat(str): The parameter to define of how to repeat the
background images.
origin(str): The positioning area of the background images.
clip(str): The painting area of the background images.
attachment (str): The parameter to define whether the background
images are fixed or scrolls with the rest of the page.
Returns:
obj (self): The instance of this class.
"""
if color:
self.__vars['--body-background-color'] = color
if image:
self.__vars['--body-background-image'] = image
if position:
self.__vars['--body-background-position'] = position
if size:
self.__vars['background-size'] = size
if repeat:
self.__vars['--body-background-repeat'] = repeat
if origin:
self.__vars['--body-background-origin'] = origin
if clip:
self.__vars['--body-background-clip'] = clip
if attachment:
self.__vars['--body-background-attachment'] = attachment
return self
def __str__(self):
"""Renders an HTML page."""
# Collects CSS supporting Bootstrap stypes.
links = [
Link('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'), # NOQA
Link('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'), # NOQA
Link('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css') # NOQA
]
# Collects FABICON showing in tab.
if self.__favicon:
links.append(Link(self.__favicon, 'icon', 'image/x-icon'))
# Collects JS scriptis supporting JQuery and code highlights.
scripts = [
Javascript('https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'), # NOQA
Javascript('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'), # NOQA
Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js') # NOQA
]
# Adds customer defined resources which could be CSS or JS files.
if self.__resources:
for resource in self.__resources:
if isinstance(resource, Link):
links.append(resource)
elif isinstance(resource, Javascript):
scripts.append(resource)
else:
raise TypeError(
'Page resource must be either <class "Link"> or '
f'<class "Javascript">, but got: {type(resource)};',
)
# Sets the page title.
title = None
if self.__title:
if isinstance(self.__title, str):
title = f'''<title>{self.__title}</title>'''
else:
raise TypeError(
f'Page title must be <str>, but got: {type(title)};',
)
# Creates inner style which will be embedded in the page.
root_vars = ''
if len(self.__vars) > 0:
for name, value in self.__vars.items():
root_vars += '%s: %s;' % (name, value)
root_vars += '--container-margin-top: %s' % (
'90px' if self.__menu else '10px'
)
root_vars = ':root{' + root_vars + '}'
inner_style = pkg_resources.resource_string(__name__, 'generic.css').\
decode('utf-8')
inner_style = root_vars + inner_style
inner_style = re.sub('\\n|\\s\\s+', ' ', inner_style)
# Creates inner script which will be embedded in the page.
inner_script = pkg_resources.resource_string(__name__, 'generic.js').\
decode('utf-8')
inner_script = re.sub('\\n|\\s\\s+', ' ', inner_script)
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<meta name="viewport"
content="width=device-width, initial-scale=1,
shrink-to-fit=no"/>
{inject(*links)}
{inject(*scripts)}
{inject(title)}
</head>
<body>
{inject(self.__menu)}
<div class="container-fluid">
{inject(self.__container)}
</div>
</body>
<script>{inner_script}</script>
<style>{inner_style}</style>
</html>
'''
|
flexible
|
{
"blob_id": "2c2ad4b6e8c5055afa3dfb3b540a44bda65fa004",
"index": 5991,
"step-1": "<mask token>\n\n\nclass Page:\n <mask token>\n <mask token>\n <mask token>\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-2": "<mask token>\n\n\nclass Page:\n <mask token>\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-3": "<mask token>\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-4": "<mask token>\nimport re\nimport pkg_resources\nfrom .components import Link, Javascript, inject\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(self, favicon=None, resources=None, title=None, menu=None,\n container=None):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n links = [Link(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'\n ), Link(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css'\n )]\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n scripts = [Javascript(\n 'https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'\n ), Javascript(\n 'https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'\n ), Javascript(\n 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js'\n )]\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n f'Page resource must be either <class \"Link\"> or <class \"Javascript\">, but got: {type(resource)};'\n )\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'<title>{self.__title}</title>'\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};')\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % ('90px' if self.__menu else\n '10px')\n root_vars = ':root{' + root_vars + '}'\n inner_style = pkg_resources.resource_string(__name__, 'generic.css'\n ).decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n inner_script = pkg_resources.resource_string(__name__, 'generic.js'\n ).decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n return f\"\"\"\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n \"\"\"\n",
"step-5": "\"\"\"\nA web-page.\n\"\"\"\n\nimport re\nimport pkg_resources\n\nfrom .components import Link, Javascript, inject\n\n\nclass Page:\n \"\"\"A web-page presenting container.\n\n Args:\n favicon (str): The file name for the favorite icon displayed in a\n browser tab(default=None)\n title (str): The page title, displayed in a browser tab (default=None).\n resources (list): The list of `Link` and `Javascript` components which\n representing the page resources (default=None).\n menu (Menu): The page top level menu (default=None).\n container (WebComponent): The page container (default=None).\n \"\"\"\n\n def __init__(\n self,\n favicon=None,\n resources=None,\n title=None,\n menu=None,\n container=None\n ):\n super().__init__()\n self.__favicon = favicon\n self.__resources = resources\n self.__title = title\n self.__menu = menu\n self.__container = container\n self.__vars = {}\n\n def __html__(self):\n \"\"\"Renders an HTML page.\"\"\"\n return str(self)\n\n def background(self, color=None, image=None, position=None, size=None,\n repeat=None, origin=None, clip=None, attachment=None):\n \"\"\"Configures the page background.\n\n See <a href=\"https://www.w3schools.com/cssref/css3_pr_background.asp\">\n form more information.\n\n Args:\n color (str): The background color to used.\n image(str): The background images to used.\n position(str): The position of the background images.\n size(str): The size of the background images.\n repeat(str): The parameter to define of how to repeat the\n background images.\n origin(str): The positioning area of the background images.\n clip(str): The painting area of the background images.\n attachment (str): The parameter to define whether the background\n images are fixed or scrolls with the rest of the page.\n\n Returns:\n obj (self): The instance of this class.\n \"\"\"\n if color:\n self.__vars['--body-background-color'] = color\n if image:\n self.__vars['--body-background-image'] = image\n if position:\n self.__vars['--body-background-position'] = position\n if size:\n self.__vars['background-size'] = size\n if repeat:\n self.__vars['--body-background-repeat'] = repeat\n if origin:\n self.__vars['--body-background-origin'] = origin\n if clip:\n self.__vars['--body-background-clip'] = clip\n if attachment:\n self.__vars['--body-background-attachment'] = attachment\n return self\n\n def __str__(self):\n \"\"\"Renders an HTML page.\"\"\"\n # Collects CSS supporting Bootstrap stypes.\n links = [\n Link('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css'), # NOQA\n Link('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.8.2/css/all.min.css'), # NOQA\n Link('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/styles/default.min.css') # NOQA\n ]\n\n # Collects FABICON showing in tab.\n if self.__favicon:\n links.append(Link(self.__favicon, 'icon', 'image/x-icon'))\n\n # Collects JS scriptis supporting JQuery and code highlights.\n scripts = [\n Javascript('https://ajax.googleapis.com/ajax/libs/jquery/3.1.0/jquery.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.16.0/umd/popper.min.js'), # NOQA\n Javascript('https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/highlight.min.js'), # NOQA\n Javascript('https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.5.0/languages/python.min.js') # NOQA\n ]\n\n # Adds customer defined resources which could be CSS or JS files.\n if self.__resources:\n for resource in self.__resources:\n if isinstance(resource, Link):\n links.append(resource)\n elif isinstance(resource, Javascript):\n scripts.append(resource)\n else:\n raise TypeError(\n 'Page resource must be either <class \"Link\"> or '\n f'<class \"Javascript\">, but got: {type(resource)};',\n )\n\n # Sets the page title.\n title = None\n if self.__title:\n if isinstance(self.__title, str):\n title = f'''<title>{self.__title}</title>'''\n else:\n raise TypeError(\n f'Page title must be <str>, but got: {type(title)};',\n )\n\n # Creates inner style which will be embedded in the page.\n root_vars = ''\n if len(self.__vars) > 0:\n for name, value in self.__vars.items():\n root_vars += '%s: %s;' % (name, value)\n root_vars += '--container-margin-top: %s' % (\n '90px' if self.__menu else '10px'\n )\n root_vars = ':root{' + root_vars + '}'\n\n inner_style = pkg_resources.resource_string(__name__, 'generic.css').\\\n decode('utf-8')\n inner_style = root_vars + inner_style\n inner_style = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_style)\n\n # Creates inner script which will be embedded in the page.\n inner_script = pkg_resources.resource_string(__name__, 'generic.js').\\\n decode('utf-8')\n inner_script = re.sub('\\\\n|\\\\s\\\\s+', ' ', inner_script)\n\n return f'''\n <!DOCTYPE html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"/>\n <meta name=\"viewport\"\n content=\"width=device-width, initial-scale=1,\n shrink-to-fit=no\"/>\n {inject(*links)}\n {inject(*scripts)}\n {inject(title)}\n </head>\n <body>\n {inject(self.__menu)}\n <div class=\"container-fluid\">\n {inject(self.__container)}\n </div>\n </body>\n <script>{inner_script}</script>\n <style>{inner_style}</style>\n </html>\n '''\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import pandas as pd
import numpy as np
import random
import csv
import pprint
import datamake
import dafunc_H
def simulation(cnt, a, b):
df, df_collist = datamake.make_df(
'/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'
)
n, m, k = datamake.stu_num()
df_stu = np.zeros((1, n + 1))
for j in range(cnt):
random.seed(48 + j)
student = datamake.make_stu(n, m, k, a, b)
#print(df_collist)
univ = datamake.univ_make(df, df_collist)
for i in range(200):
dafunc_H.da_H(student, univ, df_collist)
if j == 0:
df_stu = student[:, 0:5].T.copy()
else:
df_stuadd = student[:, 0:5].T.copy()
df_stu = np.vstack((df_stu, df_stuadd))
url = '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/' + str(
cnt) + "-" + str(a) + "-" + str(b) + 'DA-Q.txt'
np.savetxt(url, df_stu, delimiter=',', fmt='%d')
return df_stu
#def stu_summary(df_stu):
res0 = simulation(4, 0.7, 0.8)
#def do_simulation():
print(res0)
|
normal
|
{
"blob_id": "cad00f80afa142b69ced880de000b6b5b230640c",
"index": 6228,
"step-1": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\n<mask token>\nprint(res0)\n",
"step-3": "<mask token>\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\nres0 = simulation(4, 0.7, 0.8)\nprint(res0)\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport random\nimport csv\nimport pprint\nimport datamake\nimport dafunc_H\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n univ = datamake.univ_make(df, df_collist)\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n url = (\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/'\n + str(cnt) + '-' + str(a) + '-' + str(b) + 'DA-Q.txt')\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n return df_stu\n\n\nres0 = simulation(4, 0.7, 0.8)\nprint(res0)\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport random\nimport csv\nimport pprint\nimport datamake\nimport dafunc_H\n\n\ndef simulation(cnt, a, b):\n df, df_collist = datamake.make_df(\n '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/csvdata/sinhuri2018.csv'\n )\n n, m, k = datamake.stu_num()\n df_stu = np.zeros((1, n + 1))\n\n for j in range(cnt):\n random.seed(48 + j)\n student = datamake.make_stu(n, m, k, a, b)\n #print(df_collist)\n univ = datamake.univ_make(df, df_collist)\n\n for i in range(200):\n dafunc_H.da_H(student, univ, df_collist)\n\n if j == 0:\n df_stu = student[:, 0:5].T.copy()\n\n else:\n df_stuadd = student[:, 0:5].T.copy()\n df_stu = np.vstack((df_stu, df_stuadd))\n\n url = '/Users/masato/Desktop/UTTdata/prog/PyProgramming/DA_algorithm/Mavo/Result/' + str(\n cnt) + \"-\" + str(a) + \"-\" + str(b) + 'DA-Q.txt'\n\n np.savetxt(url, df_stu, delimiter=',', fmt='%d')\n\n return df_stu\n\n\n#def stu_summary(df_stu):\n\nres0 = simulation(4, 0.7, 0.8)\n\n#def do_simulation():\n\nprint(res0)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def collect_leaves(u: Union[dict, list]) ->list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def collect_leaves(u: Union[dict, list]) ->list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
<|reserved_special_token_0|>
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def collect_leaves(u: Union[dict, list]) ->list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
tree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},
'node12': [6]}, 'node2': [7, 8, 9]}
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Union
def collect_leaves(u: Union[dict, list]) ->list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
tree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},
'node12': [6]}, 'node2': [7, 8, 9]}
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
<|reserved_special_token_1|>
""""
You are given a tree-like data structure represented as nested dictionaries.
Implement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.
Implement a kind of unit tests via assert operator.
"""
from typing import Union
def collect_leaves(u: Union[dict, list]) -> list:
flatten_list = []
if isinstance(u, dict):
for item in u.values():
flatten_list.extend(collect_leaves(item))
return flatten_list
return u
tree = {
"node1": {
"node11": {
"node111": [1, 2, 3],
"node112": [4, 5]
},
"node12": [6]
},
"node2": [7, 8, 9]
}
assert collect_leaves([1, 2, 3]) == [1, 2, 3]
assert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
|
flexible
|
{
"blob_id": "603cce951dd0f78ef3ca9dce587042b3b7f6b449",
"index": 8001,
"step-1": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\n<mask token>\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-3": "<mask token>\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-4": "<mask token>\nfrom typing import Union\n\n\ndef collect_leaves(u: Union[dict, list]) ->list:\n flatten_list = []\n if isinstance(u, dict):\n for item in u.values():\n flatten_list.extend(collect_leaves(item))\n return flatten_list\n return u\n\n\ntree = {'node1': {'node11': {'node111': [1, 2, 3], 'node112': [4, 5]},\n 'node12': [6]}, 'node2': [7, 8, 9]}\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n",
"step-5": "\"\"\"\"\r\nYou are given a tree-like data structure represented as nested dictionaries.\r\nImplement a function collect_leaves that accepts a tree and returns a list of all its leaves. A leaf is a bottom-most node in a tree.\r\n\r\nImplement a kind of unit tests via assert operator.\r\n\"\"\"\r\nfrom typing import Union\r\n\r\n\r\ndef collect_leaves(u: Union[dict, list]) -> list:\r\n flatten_list = []\r\n if isinstance(u, dict):\r\n for item in u.values():\r\n flatten_list.extend(collect_leaves(item))\r\n return flatten_list\r\n return u\r\n\r\n\r\ntree = {\r\n \"node1\": {\r\n \"node11\": {\r\n \"node111\": [1, 2, 3],\r\n \"node112\": [4, 5]\r\n },\r\n \"node12\": [6]\r\n },\r\n \"node2\": [7, 8, 9]\r\n}\r\n\r\nassert collect_leaves([1, 2, 3]) == [1, 2, 3]\r\nassert collect_leaves(tree) == [1, 2, 3, 4, 5, 6, 7, 8, 9]\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import hashlib
a = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 1)
b = hashlib.pbkdf2_hmac("sha256", a, b"salt", 1)
c = hashlib.pbkdf2_hmac("sha256", b"hallo", b"salt", 2)
print(b)
print(c)
|
normal
|
{
"blob_id": "20ac73789fa7297a9230a6a2b814349d2b7da5fb",
"index": 1851,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(b)\nprint(c)\n",
"step-3": "<mask token>\na = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)\nb = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)\nc = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)\nprint(b)\nprint(c)\n",
"step-4": "import hashlib\na = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 1)\nb = hashlib.pbkdf2_hmac('sha256', a, b'salt', 1)\nc = hashlib.pbkdf2_hmac('sha256', b'hallo', b'salt', 2)\nprint(b)\nprint(c)\n",
"step-5": "import hashlib\r\na = hashlib.pbkdf2_hmac(\"sha256\", b\"hallo\", b\"salt\", 1)\r\nb = hashlib.pbkdf2_hmac(\"sha256\", a, b\"salt\", 1)\r\nc = hashlib.pbkdf2_hmac(\"sha256\", b\"hallo\", b\"salt\", 2)\r\nprint(b)\r\nprint(c)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(3):
ans += min(yuki[i], enemy[(i + 1) % 3]) * 3
yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(
0, enemy[(i + 1) % 3] - yuki[i])
for i in range(3):
ans += min(yuki[i], enemy[i])
print(ans)
<|reserved_special_token_1|>
yuki = list(map(int, input().split()))
S = input()
enemy = [S.count('G'), S.count('C'), S.count('P')]
ans = 0
for i in range(3):
ans += min(yuki[i], enemy[(i + 1) % 3]) * 3
yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(
0, enemy[(i + 1) % 3] - yuki[i])
for i in range(3):
ans += min(yuki[i], enemy[i])
print(ans)
|
flexible
|
{
"blob_id": "ce98c13555c474de0a9cb12e99a97b2316312b00",
"index": 979,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"step-3": "yuki = list(map(int, input().split()))\nS = input()\nenemy = [S.count('G'), S.count('C'), S.count('P')]\nans = 0\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class BreadCrumbHomeBasePage:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BreadCrumbHomeBasePage:
<|reserved_special_token_0|>
def gotoTicketInfoBasePage(self, ticketInfoPage):
self.driver.get(ticketInfoPage)
breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()
breadCrumbTicketInfoBasePage.driver = self.driver
return breadCrumbTicketInfoBasePage
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BreadCrumbHomeBasePage:
def __init__(self):
""""""
def gotoTicketInfoBasePage(self, ticketInfoPage):
self.driver.get(ticketInfoPage)
breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()
breadCrumbTicketInfoBasePage.driver = self.driver
return breadCrumbTicketInfoBasePage
<|reserved_special_token_1|>
from src.basepages.BreadCrumbTicketInfoBasePage import *
class BreadCrumbHomeBasePage:
def __init__(self):
""""""
def gotoTicketInfoBasePage(self, ticketInfoPage):
self.driver.get(ticketInfoPage)
breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()
breadCrumbTicketInfoBasePage.driver = self.driver
return breadCrumbTicketInfoBasePage
<|reserved_special_token_1|>
from src.basepages.BreadCrumbTicketInfoBasePage import *
class BreadCrumbHomeBasePage:
def __init__(self):
""
def gotoTicketInfoBasePage(self,ticketInfoPage):
self.driver.get(ticketInfoPage)
breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()
breadCrumbTicketInfoBasePage.driver = self.driver
return breadCrumbTicketInfoBasePage
|
flexible
|
{
"blob_id": "47c1746c2edfe4018decd59efbacc8be89a1f49e",
"index": 3653,
"step-1": "<mask token>\n\n\nclass BreadCrumbHomeBasePage:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BreadCrumbHomeBasePage:\n <mask token>\n\n def gotoTicketInfoBasePage(self, ticketInfoPage):\n self.driver.get(ticketInfoPage)\n breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()\n breadCrumbTicketInfoBasePage.driver = self.driver\n return breadCrumbTicketInfoBasePage\n",
"step-3": "<mask token>\n\n\nclass BreadCrumbHomeBasePage:\n\n def __init__(self):\n \"\"\"\"\"\"\n\n def gotoTicketInfoBasePage(self, ticketInfoPage):\n self.driver.get(ticketInfoPage)\n breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()\n breadCrumbTicketInfoBasePage.driver = self.driver\n return breadCrumbTicketInfoBasePage\n",
"step-4": "from src.basepages.BreadCrumbTicketInfoBasePage import *\n\n\nclass BreadCrumbHomeBasePage:\n\n def __init__(self):\n \"\"\"\"\"\"\n\n def gotoTicketInfoBasePage(self, ticketInfoPage):\n self.driver.get(ticketInfoPage)\n breadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()\n breadCrumbTicketInfoBasePage.driver = self.driver\n return breadCrumbTicketInfoBasePage\n",
"step-5": "\nfrom src.basepages.BreadCrumbTicketInfoBasePage import *\n\nclass BreadCrumbHomeBasePage:\n\tdef __init__(self):\n\t\t\"\"\n\n\tdef gotoTicketInfoBasePage(self,ticketInfoPage):\n\t\tself.driver.get(ticketInfoPage)\n\n\t\tbreadCrumbTicketInfoBasePage = BreadCrumbTicketInfoBasePage()\n\t\tbreadCrumbTicketInfoBasePage.driver = self.driver\n\t\treturn breadCrumbTicketInfoBasePage\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import pygame
import serial
import time
ser1 = serial.Serial('/dev/ttyACM0', 115200) #Right
ser1.write('?\n')
time.sleep(0.5)
if ser1.readline()[4] == 0:
ser2 = serial.Serial('/dev/ttyACM1', 115200) #Left, negative speeds go forward
else:
ser1 = serial.Serial('/dev/ttyACM1', 115200)
ser2 = serial.Serial('/dev/ttyACM0', 115200)
def write_spd(write1, write2):
ser1.write('sd'+str(write1)+'\n')
ser2.write('sd'+str(-write2)+'\n')
speed = 60
up = 0
down = 0
left = 0
right = 0
state = {'up':0, 'down':0, 'left':0, 'right':0}
scr = pygame.display.set_mode((1,1))
while(True):
elist = pygame.event.get()
for event in elist:
if event.type == 2 and event.dict.get('key') == 27:
write_spd(0, 0)
quit()
if event.type == 2:
if event.dict.get('key') == 273:
state['up'] = 1
elif event.dict.get('key') == 274:
state['down'] = 1
elif event.dict.get('key') == 275:
state['right'] = 1
elif event.dict.get('key') == 276:
state['left'] = 1
if event.type == 3:
if event.dict.get('key') == 273:
state['up'] = 0
elif event.dict.get('key') == 274:
state['down'] = 0
elif event.dict.get('key') == 275:
state['right'] = 0
elif event.dict.get('key') == 276:
state['left'] = 0
if state['up'] == 1:
if state['right'] == 1:
write_spd(0, speed)
elif state['left'] == 1:
write_spd(speed, 0)
else:
write_spd(speed, speed)
elif state['left'] == 1:
write_spd(speed, -speed)
elif state['right'] == 1:
write_spd(-speed, speed)
elif state['down'] == 1:
write_spd(-speed, -speed)
else:
write_spd(0, 0)
|
normal
|
{
"blob_id": "e6d4d12d47391927364fdc9765c68690d42c5d8d",
"index": 8950,
"step-1": "<mask token>\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\n<mask token>\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-3": "<mask token>\nser1 = serial.Serial('/dev/ttyACM0', 115200)\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\nspeed = 60\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up': 0, 'down': 0, 'left': 0, 'right': 0}\nscr = pygame.display.set_mode((1, 1))\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-4": "import pygame\nimport serial\nimport time\nser1 = serial.Serial('/dev/ttyACM0', 115200)\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200)\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\n\ndef write_spd(write1, write2):\n ser1.write('sd' + str(write1) + '\\n')\n ser2.write('sd' + str(-write2) + '\\n')\n\n\nspeed = 60\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up': 0, 'down': 0, 'left': 0, 'right': 0}\nscr = pygame.display.set_mode((1, 1))\nwhile True:\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n",
"step-5": "import pygame\nimport serial\nimport time\n\nser1 = serial.Serial('/dev/ttyACM0', 115200) #Right\nser1.write('?\\n')\ntime.sleep(0.5)\nif ser1.readline()[4] == 0:\n ser2 = serial.Serial('/dev/ttyACM1', 115200) #Left, negative speeds go forward\nelse:\n ser1 = serial.Serial('/dev/ttyACM1', 115200)\n ser2 = serial.Serial('/dev/ttyACM0', 115200)\n\ndef write_spd(write1, write2):\n ser1.write('sd'+str(write1)+'\\n')\n ser2.write('sd'+str(-write2)+'\\n')\n\nspeed = 60\n\nup = 0\ndown = 0\nleft = 0\nright = 0\nstate = {'up':0, 'down':0, 'left':0, 'right':0}\n\nscr = pygame.display.set_mode((1,1))\nwhile(True):\n elist = pygame.event.get()\n for event in elist:\n if event.type == 2 and event.dict.get('key') == 27:\n write_spd(0, 0)\n quit()\n if event.type == 2:\n if event.dict.get('key') == 273:\n state['up'] = 1\n elif event.dict.get('key') == 274:\n state['down'] = 1\n elif event.dict.get('key') == 275:\n state['right'] = 1\n elif event.dict.get('key') == 276:\n state['left'] = 1\n if event.type == 3:\n if event.dict.get('key') == 273:\n state['up'] = 0\n elif event.dict.get('key') == 274:\n state['down'] = 0\n elif event.dict.get('key') == 275:\n state['right'] = 0\n elif event.dict.get('key') == 276:\n state['left'] = 0\n if state['up'] == 1:\n if state['right'] == 1:\n write_spd(0, speed)\n elif state['left'] == 1:\n write_spd(speed, 0)\n else:\n write_spd(speed, speed)\n elif state['left'] == 1:\n write_spd(speed, -speed)\n elif state['right'] == 1:\n write_spd(-speed, speed)\n elif state['down'] == 1:\n write_spd(-speed, -speed)\n else:\n write_spd(0, 0)\n \n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MockProto(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_mock_spout(self, spout_name, output_streams, spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self, bolt_name, input_streams, output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(self, spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout'
spout = self.create_mock_spout('mock_spout', [stream1],
spout_parallelism)
topology.spouts.extend([spout])
bolt = self.create_mock_bolt('mock_bolt', [stream1], [],
bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = 'mock_topology_id'
topology.name = 'mock_topology_name'
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout1'
stream2 = protoTopology.StreamId()
stream2.id = 'mock_stream2'
stream2.component_name = 'mock_spout1'
stream3 = protoTopology.StreamId()
stream3.id = 'mock_stream3'
stream3.component_name = 'mock_bolt1'
stream4 = protoTopology.StreamId()
stream4.id = 'mock_stream4'
stream4.component_name = 'mock_bolt2'
spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(self, spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism, bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism, bolt1_parallelism, bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockProto(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def create_mock_spout(self, spout_name, output_streams, spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self, bolt_name, input_streams, output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(self, spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout'
spout = self.create_mock_spout('mock_spout', [stream1],
spout_parallelism)
topology.spouts.extend([spout])
bolt = self.create_mock_bolt('mock_bolt', [stream1], [],
bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = 'mock_topology_id'
topology.name = 'mock_topology_name'
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout1'
stream2 = protoTopology.StreamId()
stream2.id = 'mock_stream2'
stream2.component_name = 'mock_spout1'
stream3 = protoTopology.StreamId()
stream3.id = 'mock_stream3'
stream3.component_name = 'mock_bolt1'
stream4 = protoTopology.StreamId()
stream4.id = 'mock_stream4'
stream4.component_name = 'mock_bolt2'
spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(self, spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism, bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism, bolt1_parallelism, bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MockProto(object):
""" Mocking Proto"""
topology_name = 'mock_topology_name'
topology_id = 'mock_topology_id'
cluster = 'mock_topology_cluster'
environ = 'mock_topology_environ'
def create_mock_spout(self, spout_name, output_streams, spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self, bolt_name, input_streams, output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(self, spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout'
spout = self.create_mock_spout('mock_spout', [stream1],
spout_parallelism)
topology.spouts.extend([spout])
bolt = self.create_mock_bolt('mock_bolt', [stream1], [],
bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = 'mock_topology_id'
topology.name = 'mock_topology_name'
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout1'
stream2 = protoTopology.StreamId()
stream2.id = 'mock_stream2'
stream2.component_name = 'mock_spout1'
stream3 = protoTopology.StreamId()
stream3.id = 'mock_stream3'
stream3.component_name = 'mock_bolt1'
stream4 = protoTopology.StreamId()
stream4.id = 'mock_stream4'
stream4.component_name = 'mock_bolt2'
spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(self, spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism, bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism, bolt1_parallelism, bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from heron.common.src.python import constants
import heron.proto.execution_state_pb2 as protoEState
import heron.proto.physical_plan_pb2 as protoPPlan
import heron.proto.tmaster_pb2 as protoTmaster
import heron.proto.topology_pb2 as protoTopology
class MockProto(object):
""" Mocking Proto"""
topology_name = 'mock_topology_name'
topology_id = 'mock_topology_id'
cluster = 'mock_topology_cluster'
environ = 'mock_topology_environ'
def create_mock_spout(self, spout_name, output_streams, spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self, bolt_name, input_streams, output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(self, spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout'
spout = self.create_mock_spout('mock_spout', [stream1],
spout_parallelism)
topology.spouts.extend([spout])
bolt = self.create_mock_bolt('mock_bolt', [stream1], [],
bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = 'mock_topology_id'
topology.name = 'mock_topology_name'
stream1 = protoTopology.StreamId()
stream1.id = 'mock_stream1'
stream1.component_name = 'mock_spout1'
stream2 = protoTopology.StreamId()
stream2.id = 'mock_stream2'
stream2.component_name = 'mock_spout1'
stream3 = protoTopology.StreamId()
stream3.id = 'mock_stream3'
stream3.component_name = 'mock_bolt1'
stream4 = protoTopology.StreamId()
stream4.id = 'mock_stream4'
stream4.component_name = 'mock_bolt2'
spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(self, spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism, bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(self, spout_parallelism=1,
bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism, bolt1_parallelism, bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
<|reserved_special_token_1|>
''' mock_proto.py '''
from heron.common.src.python import constants
import heron.proto.execution_state_pb2 as protoEState
import heron.proto.physical_plan_pb2 as protoPPlan
import heron.proto.tmaster_pb2 as protoTmaster
import heron.proto.topology_pb2 as protoTopology
# pylint: disable=no-self-use, missing-docstring
class MockProto(object):
''' Mocking Proto'''
topology_name = "mock_topology_name"
topology_id = "mock_topology_id"
cluster = "mock_topology_cluster"
environ = "mock_topology_environ"
def create_mock_spout(self,
spout_name,
output_streams,
spout_parallelism):
spout = protoTopology.Spout()
spout.comp.name = spout_name
kv = spout.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(spout_parallelism)
for stream in output_streams:
spout.outputs.add().stream.CopyFrom(stream)
return spout
def create_mock_bolt(self,
bolt_name,
input_streams,
output_streams,
bolt_parallelism):
bolt = protoTopology.Bolt()
bolt.comp.name = bolt_name
kv = bolt.comp.config.kvs.add()
kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(bolt_parallelism)
for stream in input_streams:
bolt.inputs.add().stream.CopyFrom(stream)
for stream in output_streams:
bolt.outputs.add().stream.CopyFrom(stream)
return bolt
def create_mock_simple_topology(
self,
spout_parallelism=1,
bolt_parallelism=1):
"""
Simple topology contains one spout and one bolt.
"""
topology = protoTopology.Topology()
topology.id = MockProto.topology_id
topology.name = MockProto.topology_name
# Stream1
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout"
# Spout1
spout = self.create_mock_spout("mock_spout", [stream1], spout_parallelism)
topology.spouts.extend([spout])
# Bolt1
bolt = self.create_mock_bolt("mock_bolt", [stream1], [], bolt_parallelism)
topology.bolts.extend([bolt])
return topology
def create_mock_medium_topology(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
"""
Medium topology is a three stage topology
with one spout, two mid stage bolts, and one
last stage bolt.
S -str1-> B1 -str3-> B3
S -str2-> B2 -str4-> B3
"""
topology = protoTopology.Topology()
topology.id = "mock_topology_id"
topology.name = "mock_topology_name"
# Streams
stream1 = protoTopology.StreamId()
stream1.id = "mock_stream1"
stream1.component_name = "mock_spout1"
stream2 = protoTopology.StreamId()
stream2.id = "mock_stream2"
stream2.component_name = "mock_spout1"
stream3 = protoTopology.StreamId()
stream3.id = "mock_stream3"
stream3.component_name = "mock_bolt1"
stream4 = protoTopology.StreamId()
stream4.id = "mock_stream4"
stream4.component_name = "mock_bolt2"
# Spouts
spout1 = self.create_mock_spout("mock_spout1",
[stream1, stream2],
spout_parallelism)
topology.spouts.extend([spout1])
# Bolts
bolt1 = self.create_mock_bolt("mock_bolt1",
[stream1],
[stream3],
bolt1_parallelism)
bolt2 = self.create_mock_bolt("mock_bolt2",
[stream2],
[stream4],
bolt2_parallelism)
bolt3 = self.create_mock_bolt("mock_bolt3",
[stream3, stream4],
[],
bolt3_parallelism)
topology.bolts.extend([bolt1, bolt2, bolt3])
return topology
def create_mock_simple_physical_plan(
self,
spout_parallelism=1,
bolt_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_simple_topology(
spout_parallelism,
bolt_parallelism))
return pplan
def create_mock_medium_physical_plan(
self,
spout_parallelism=1,
bolt1_parallelism=1,
bolt2_parallelism=1,
bolt3_parallelism=1):
pplan = protoPPlan.PhysicalPlan()
pplan.topology.CopyFrom(self.create_mock_medium_topology(
spout_parallelism,
bolt1_parallelism,
bolt2_parallelism,
bolt3_parallelism))
return pplan
def create_mock_execution_state(self):
estate = protoEState.ExecutionState()
estate.topology_name = MockProto.topology_name
estate.topology_id = MockProto.topology_id
estate.cluster = MockProto.cluster
estate.environ = MockProto.environ
return estate
def create_mock_tmaster(self):
tmaster = protoTmaster.TMasterLocation()
return tmaster
def add_topology_config(self, topology, key, value):
kv = topology.topology_config.kvs.add()
kv.key = key
kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')
kv.value = str(value)
|
flexible
|
{
"blob_id": "002ef36bd132f1ac258b3f8baf8098accbd8a8f2",
"index": 6839,
"step-1": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-3": "<mask token>\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-4": "<mask token>\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-5": "''' mock_proto.py '''\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n# pylint: disable=no-self-use, missing-docstring\nclass MockProto(object):\n ''' Mocking Proto'''\n topology_name = \"mock_topology_name\"\n topology_id = \"mock_topology_id\"\n cluster = \"mock_topology_cluster\"\n environ = \"mock_topology_environ\"\n\n def create_mock_spout(self,\n spout_name,\n output_streams,\n spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self,\n bolt_name,\n input_streams,\n output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n\n # Stream1\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout\"\n\n # Spout1\n spout = self.create_mock_spout(\"mock_spout\", [stream1], spout_parallelism)\n topology.spouts.extend([spout])\n\n # Bolt1\n bolt = self.create_mock_bolt(\"mock_bolt\", [stream1], [], bolt_parallelism)\n topology.bolts.extend([bolt])\n\n return topology\n\n def create_mock_medium_topology(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = \"mock_topology_id\"\n topology.name = \"mock_topology_name\"\n\n # Streams\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout1\"\n\n stream2 = protoTopology.StreamId()\n stream2.id = \"mock_stream2\"\n stream2.component_name = \"mock_spout1\"\n\n stream3 = protoTopology.StreamId()\n stream3.id = \"mock_stream3\"\n stream3.component_name = \"mock_bolt1\"\n\n stream4 = protoTopology.StreamId()\n stream4.id = \"mock_stream4\"\n stream4.component_name = \"mock_bolt2\"\n\n # Spouts\n spout1 = self.create_mock_spout(\"mock_spout1\",\n [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n\n # Bolts\n bolt1 = self.create_mock_bolt(\"mock_bolt1\",\n [stream1],\n [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt(\"mock_bolt2\",\n [stream2],\n [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt(\"mock_bolt3\",\n [stream3, stream4],\n [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n\n\n return topology\n\n def create_mock_simple_physical_plan(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism,\n bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism,\n bolt1_parallelism,\n bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
#!/usr/bin/env python
import speech_recognition as sr
from termcolor import colored as color
import apiai
import json
from os import system
import wikipedia as wiki
from time import sleep
import webbrowser as wb
BOLD = "\033[1m" #use to bold the text
END = "\033[0m" #use to close the bold text
CLIENT_ACCESS_TOKEN = "2245d4ab7c99466e806c8986a18234c4"
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
google_search = "https://www.google.com/search?q="
youtube_search = "https://www.youtube.com/results?search_query="
google_drive = "https://drive.google.com"
gmail = "https://mail.google.com"
try:
r = sr.Recognizer()
with sr.Microphone() as source:
system("clear")
print(color(BOLD+"Hola!\nAsk me anything."+END,"green"))
while True:
audio = r.listen(source)
# while True:
try:
query = r.recognize_google(audio)
print(query)
except sr.UnknownValueError:
print (color("Listening","blue"))
except KeyboardInterrupt:
print (color(BOLD+" Bye!"+END, "cyan"))
|
normal
|
{
"blob_id": "d3e728bda85d2e72b8e477ab439d4dcffa23d63a",
"index": 5448,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n system('clear')\n print(color(BOLD + 'Hola!\\nAsk me anything.' + END, 'green'))\n while True:\n audio = r.listen(source)\n try:\n query = r.recognize_google(audio)\n print(query)\n except sr.UnknownValueError:\n print(color('Listening', 'blue'))\nexcept KeyboardInterrupt:\n print(color(BOLD + ' Bye!' + END, 'cyan'))\n",
"step-3": "<mask token>\nBOLD = '\\x1b[1m'\nEND = '\\x1b[0m'\nCLIENT_ACCESS_TOKEN = '2245d4ab7c99466e806c8986a18234c4'\nai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\ngoogle_search = 'https://www.google.com/search?q='\nyoutube_search = 'https://www.youtube.com/results?search_query='\ngoogle_drive = 'https://drive.google.com'\ngmail = 'https://mail.google.com'\ntry:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n system('clear')\n print(color(BOLD + 'Hola!\\nAsk me anything.' + END, 'green'))\n while True:\n audio = r.listen(source)\n try:\n query = r.recognize_google(audio)\n print(query)\n except sr.UnknownValueError:\n print(color('Listening', 'blue'))\nexcept KeyboardInterrupt:\n print(color(BOLD + ' Bye!' + END, 'cyan'))\n",
"step-4": "import speech_recognition as sr\nfrom termcolor import colored as color\nimport apiai\nimport json\nfrom os import system\nimport wikipedia as wiki\nfrom time import sleep\nimport webbrowser as wb\nBOLD = '\\x1b[1m'\nEND = '\\x1b[0m'\nCLIENT_ACCESS_TOKEN = '2245d4ab7c99466e806c8986a18234c4'\nai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\ngoogle_search = 'https://www.google.com/search?q='\nyoutube_search = 'https://www.youtube.com/results?search_query='\ngoogle_drive = 'https://drive.google.com'\ngmail = 'https://mail.google.com'\ntry:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n system('clear')\n print(color(BOLD + 'Hola!\\nAsk me anything.' + END, 'green'))\n while True:\n audio = r.listen(source)\n try:\n query = r.recognize_google(audio)\n print(query)\n except sr.UnknownValueError:\n print(color('Listening', 'blue'))\nexcept KeyboardInterrupt:\n print(color(BOLD + ' Bye!' + END, 'cyan'))\n",
"step-5": "#!/usr/bin/env python\n\nimport speech_recognition as sr\nfrom termcolor import colored as color\nimport apiai\nimport json\nfrom os import system\nimport wikipedia as wiki\nfrom time import sleep\nimport webbrowser as wb\n\n\nBOLD = \"\\033[1m\" #use to bold the text\nEND = \"\\033[0m\" #use to close the bold text\nCLIENT_ACCESS_TOKEN = \"2245d4ab7c99466e806c8986a18234c4\"\nai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)\n\ngoogle_search = \"https://www.google.com/search?q=\"\nyoutube_search = \"https://www.youtube.com/results?search_query=\"\ngoogle_drive = \"https://drive.google.com\"\ngmail = \"https://mail.google.com\"\ntry:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n system(\"clear\")\n print(color(BOLD+\"Hola!\\nAsk me anything.\"+END,\"green\"))\n while True:\n audio = r.listen(source)\n\n# while True: \n try:\n query = r.recognize_google(audio)\n print(query)\n except sr.UnknownValueError:\n print (color(\"Listening\",\"blue\"))\n\n\n \n\nexcept KeyboardInterrupt:\n print (color(BOLD+\" Bye!\"+END, \"cyan\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.