code
stringlengths
13
1.2M
order_type
stringclasses
1 value
original_example
dict
step_ids
listlengths
1
5
import datetime from flask import Flask, render_template, request import database import database1 import database2 import getYoutubeVideoLinks as getYT import os os.environ["EAI_USERNAME"] = '[email protected]' os.environ["EAI_PASSWORD"] = 'Testqwerty1!' from expertai.nlapi.cloud.client import ExpertAiClient client = ExpertAiClient() # Output overall sentiment app = Flask(__name__) database.create_tables() database1.create_table() database2.create_tablee() language= 'en' videos = [] @app.route("/", methods=["GET", "POST"]) def home(): if request.method == "POST": entry_content = request.form.get("content") output = client.specific_resource_analysis(body={"document": {"text": entry_content}}, params={'language': language, 'resource': 'relevants'}) database2.create_entryss(entry_content, datetime.datetime.today().strftime("%b %d")) for lemma in output.main_lemmas: print(lemma.value) video = getYT.searchVideoForKeyword(lemma.value) for indivvideo in video: database.create_entry(entry_content, datetime.datetime.today().strftime("%b %d"), indivvideo) videos.append(f'{indivvideo}') return render_template("home.html") @app.route("/feedback", methods=["GET", "POST"]) def feedback(): if request.method == "POST": entry_contents = request.form.get("contents") output = client.specific_resource_analysis(body={"document": {"text": entry_contents}},params={'language': language, 'resource': 'sentiment'}) database1.create_entrys(entry_contents, datetime.datetime.today().strftime("%b %d"), output.sentiment.overall) print(output.sentiment.overall) return render_template("feedback.html") @app.route("/recommendation", methods=["GET", "POST"]) def recommendation(): return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee()) @app.route('/negative', methods=["GET", "POST"]) def negative(): return render_template("negative.html", entries=database1.retrieve_entrie()) @app.route('/positive', methods=["GET", "POST"]) def positive(): return render_template("positive.html", entries=database1.retrieve_entrie())
normal
{ "blob_id": "d0f2d47a786b85367f96897e7cd8c2ef8c577e2b", "index": 2961, "step-1": "<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n", "step-2": "<mask token>\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n", "step-3": "<mask token>\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\n<mask token>\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n", "step-4": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\nimport os\nos.environ['EAI_USERNAME'] = '[email protected]'\nos.environ['EAI_PASSWORD'] = 'Testqwerty1!'\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\napp = Flask(__name__)\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\nlanguage = 'en'\nvideos = []\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n entry_content = request.form.get('content')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_content}}, params={'language': language,\n 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().\n strftime('%b %d'))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.\n today().strftime('%b %d'), indivvideo)\n videos.append(f'{indivvideo}')\n return render_template('home.html')\n\n\[email protected]('/feedback', methods=['GET', 'POST'])\ndef feedback():\n if request.method == 'POST':\n entry_contents = request.form.get('contents')\n output = client.specific_resource_analysis(body={'document': {\n 'text': entry_contents}}, params={'language': language,\n 'resource': 'sentiment'})\n database1.create_entrys(entry_contents, datetime.datetime.today().\n strftime('%b %d'), output.sentiment.overall)\n print(output.sentiment.overall)\n return render_template('feedback.html')\n\n\[email protected]('/recommendation', methods=['GET', 'POST'])\ndef recommendation():\n return render_template('index.html', videos=videos, entries=database.\n retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=['GET', 'POST'])\ndef negative():\n return render_template('negative.html', entries=database1.retrieve_entrie()\n )\n\n\[email protected]('/positive', methods=['GET', 'POST'])\ndef positive():\n return render_template('positive.html', entries=database1.retrieve_entrie()\n )\n", "step-5": "import datetime\nfrom flask import Flask, render_template, request\nimport database\nimport database1\nimport database2\nimport getYoutubeVideoLinks as getYT\n\nimport os\nos.environ[\"EAI_USERNAME\"] = '[email protected]'\nos.environ[\"EAI_PASSWORD\"] = 'Testqwerty1!'\n\nfrom expertai.nlapi.cloud.client import ExpertAiClient\nclient = ExpertAiClient()\n\n# Output overall sentiment\n\n\napp = Flask(__name__)\n\ndatabase.create_tables()\ndatabase1.create_table()\ndatabase2.create_tablee()\n\nlanguage= 'en'\n\nvideos = []\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n \n if request.method == \"POST\":\n entry_content = request.form.get(\"content\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_content}}, params={'language': language, 'resource': 'relevants'})\n database2.create_entryss(entry_content, datetime.datetime.today().strftime(\"%b %d\"))\n for lemma in output.main_lemmas:\n print(lemma.value)\n video = getYT.searchVideoForKeyword(lemma.value)\n for indivvideo in video:\n database.create_entry(entry_content, datetime.datetime.today().strftime(\"%b %d\"), indivvideo)\n videos.append(f'{indivvideo}')\n \n return render_template(\"home.html\")\n\n\n\[email protected](\"/feedback\", methods=[\"GET\", \"POST\"])\ndef feedback():\n if request.method == \"POST\":\n entry_contents = request.form.get(\"contents\")\n output = client.specific_resource_analysis(body={\"document\": {\"text\": entry_contents}},params={'language': language, 'resource': 'sentiment'})\n \n database1.create_entrys(entry_contents, datetime.datetime.today().strftime(\"%b %d\"), output.sentiment.overall)\n print(output.sentiment.overall)\n\n return render_template(\"feedback.html\")\n\n\n\n\[email protected](\"/recommendation\", methods=[\"GET\", \"POST\"])\ndef recommendation(): \n return render_template('index.html', videos=videos, entries=database.retrieve_entries(), entrie=database2.retrieve_entriee())\n\n\[email protected]('/negative', methods=[\"GET\", \"POST\"])\ndef negative():\n return render_template(\"negative.html\", entries=database1.retrieve_entrie())\n\n\[email protected]('/positive', methods=[\"GET\", \"POST\"])\ndef positive():\n return render_template(\"positive.html\", entries=database1.retrieve_entrie())\n\n\n\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
from services.BureauActif.libbureauactif.db.Base import db, BaseModel class BureauActifCalendarDataType(db.Model, BaseModel): __tablename__ = "ba_calendar_data_type" id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True, autoincrement=True) name = db.Column(db.String, nullable=False) def to_json(self, ignore_fields=None, minimal=False): if ignore_fields is None: ignore_fields = [] return super().to_json(ignore_fields=ignore_fields) @staticmethod def create_defaults(): data = BureauActifCalendarDataType() data.name = 'seating' db.session.add(data) data2 = BureauActifCalendarDataType() data2.name = 'standing' db.session.add(data2) data3 = BureauActifCalendarDataType() data3.name = 'positionChanges' db.session.add(data3) data4 = BureauActifCalendarDataType() data4.name = 'absent' db.session.add(data4) db.session.commit()
normal
{ "blob_id": "83117000f5f34490cb14580a9867b1e871ccc2ae", "index": 526, "step-1": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n", "step-3": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n", "step-4": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n", "step-5": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = \"ba_calendar_data_type\"\n id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,\n autoincrement=True)\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n\n db.session.commit()\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
from datetime import datetime, timedelta from request.insider_networking import InsiderTransactions from db import FinanceDB from acquisition.symbol.financial_symbols import Financial_Symbols class FintelInsiderAcquisition(): def __init__(self, trading_date=None): self.task_name = 'FintelInsiderAcquisition' self.trading_date = trading_date self.symbols = Financial_Symbols.get_all() self.finance_db = None self._reset_counters() def _reset_counters(self): self.found = 0 self.not_found = 0 self.symbols = Financial_Symbols.get_all() def _log(self, msg, level='info'): pass def get_incomplete_insider_tasks(self): if not self.finance_db or not self.trading_date: return [] found = set(list(map(lambda x: x['symbol'], self.finance_db.find({"trading_date": str(self.trading_date.date())}, {"symbol": 1})))) return list(set(self.symbols) - found) def get_complete_insider_tasks(self): symbols = [] if not self.finance_db or not self.trading_date: return symbols found = set(map(lambda x: x['symbol'], list(self.finance_db.find({"trading_date": str(self.trading_date.date())}, {"symbol": 1})))) return list(found) def start(self): self._reset_counters() if self.trading_date.weekday() > 4: self._log('Not running {} on weekend'.format(self.task_name)) elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16: self._log('Trading day has not finished yet, {}'.format(self.trading_date.time())) else: self.finance_db = FinanceDB('stock_insider') incomplete = self.get_incomplete_insider_tasks() insider_transactions = InsiderTransactions(incomplete, batching=True) for insider_data in insider_transactions.generate(): documents = [] for symbol, data in insider_data.items(): if data: data['trading_date'] = str(self.trading_date.date()) data['symbol'] = symbol documents.append(data) self.found += 1 else: self.not_found += 1 if documents: self.finance_db.insert_many(documents) self._log('{}/{} found/not_found'.format(self.found, self.not_found)) # incomplete = len(self.get_incomplete_insider_tasks()) # complete = len(self.get_complete_insider_tasks()) # self._log('{}/{} complete/incomplete'.format(complete, incomplete)) def sleep_time(self): now = datetime.now() if self.found + self.not_found == 0: if now.weekday() > 4: next_trading = now + timedelta(days=7-now.weekday()) tomorrow = datetime(year=next_trading.year, month=next_trading.month, day=next_trading.day, hour=16, minute=0, second=0) return (tomorrow - now).total_seconds() elif now.weekday() <= 4 and now.hour < 16: later = datetime(year=now.year, month=now.month, day=now.day, hour=16, minute=0, second=0) return (later - now).total_seconds() else: return 900 elif self.found == 0 and self.not_found > 0: if now.hour < 16: later = datetime(year=now.year, month=now.month, day=now.day, hour=16, minute=0, second=0) return (later - now).total_seconds() else: tomorrow = now + timedelta(days=1) tomorrow = datetime(year=tomorrow.year, month=tomorrow.month, day=tomorrow.day, hour=16, minute=0, second=0) return (tomorrow - now).total_seconds() else: return 900 if __name__ == "__main__": FintelInsiderAcquisition(datetime.now()).start()
normal
{ "blob_id": "08b13069020696d59028003a11b0ff06014a4c68", "index": 3779, "step-1": "<mask token>\n\n\nclass FintelInsiderAcquisition:\n\n def __init__(self, trading_date=None):\n self.task_name = 'FintelInsiderAcquisition'\n self.trading_date = trading_date\n self.symbols = Financial_Symbols.get_all()\n self.finance_db = None\n self._reset_counters()\n <mask token>\n <mask token>\n\n def get_incomplete_insider_tasks(self):\n if not self.finance_db or not self.trading_date:\n return []\n found = set(list(map(lambda x: x['symbol'], self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(set(self.symbols) - found)\n\n def get_complete_insider_tasks(self):\n symbols = []\n if not self.finance_db or not self.trading_date:\n return symbols\n found = set(map(lambda x: x['symbol'], list(self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(found)\n\n def start(self):\n self._reset_counters()\n if self.trading_date.weekday() > 4:\n self._log('Not running {} on weekend'.format(self.task_name))\n elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16:\n self._log('Trading day has not finished yet, {}'.format(self.\n trading_date.time()))\n else:\n self.finance_db = FinanceDB('stock_insider')\n incomplete = self.get_incomplete_insider_tasks()\n insider_transactions = InsiderTransactions(incomplete, batching\n =True)\n for insider_data in insider_transactions.generate():\n documents = []\n for symbol, data in insider_data.items():\n if data:\n data['trading_date'] = str(self.trading_date.date())\n data['symbol'] = symbol\n documents.append(data)\n self.found += 1\n else:\n self.not_found += 1\n if documents:\n self.finance_db.insert_many(documents)\n self._log('{}/{} found/not_found'.format(self.found, self.\n not_found))\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass FintelInsiderAcquisition:\n\n def __init__(self, trading_date=None):\n self.task_name = 'FintelInsiderAcquisition'\n self.trading_date = trading_date\n self.symbols = Financial_Symbols.get_all()\n self.finance_db = None\n self._reset_counters()\n\n def _reset_counters(self):\n self.found = 0\n self.not_found = 0\n self.symbols = Financial_Symbols.get_all()\n <mask token>\n\n def get_incomplete_insider_tasks(self):\n if not self.finance_db or not self.trading_date:\n return []\n found = set(list(map(lambda x: x['symbol'], self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(set(self.symbols) - found)\n\n def get_complete_insider_tasks(self):\n symbols = []\n if not self.finance_db or not self.trading_date:\n return symbols\n found = set(map(lambda x: x['symbol'], list(self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(found)\n\n def start(self):\n self._reset_counters()\n if self.trading_date.weekday() > 4:\n self._log('Not running {} on weekend'.format(self.task_name))\n elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16:\n self._log('Trading day has not finished yet, {}'.format(self.\n trading_date.time()))\n else:\n self.finance_db = FinanceDB('stock_insider')\n incomplete = self.get_incomplete_insider_tasks()\n insider_transactions = InsiderTransactions(incomplete, batching\n =True)\n for insider_data in insider_transactions.generate():\n documents = []\n for symbol, data in insider_data.items():\n if data:\n data['trading_date'] = str(self.trading_date.date())\n data['symbol'] = symbol\n documents.append(data)\n self.found += 1\n else:\n self.not_found += 1\n if documents:\n self.finance_db.insert_many(documents)\n self._log('{}/{} found/not_found'.format(self.found, self.\n not_found))\n\n def sleep_time(self):\n now = datetime.now()\n if self.found + self.not_found == 0:\n if now.weekday() > 4:\n next_trading = now + timedelta(days=7 - now.weekday())\n tomorrow = datetime(year=next_trading.year, month=\n next_trading.month, day=next_trading.day, hour=16,\n minute=0, second=0)\n return (tomorrow - now).total_seconds()\n elif now.weekday() <= 4 and now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n return 900\n elif self.found == 0 and self.not_found > 0:\n if now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n tomorrow = now + timedelta(days=1)\n tomorrow = datetime(year=tomorrow.year, month=tomorrow.\n month, day=tomorrow.day, hour=16, minute=0, second=0)\n return (tomorrow - now).total_seconds()\n else:\n return 900\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass FintelInsiderAcquisition:\n\n def __init__(self, trading_date=None):\n self.task_name = 'FintelInsiderAcquisition'\n self.trading_date = trading_date\n self.symbols = Financial_Symbols.get_all()\n self.finance_db = None\n self._reset_counters()\n\n def _reset_counters(self):\n self.found = 0\n self.not_found = 0\n self.symbols = Financial_Symbols.get_all()\n\n def _log(self, msg, level='info'):\n pass\n\n def get_incomplete_insider_tasks(self):\n if not self.finance_db or not self.trading_date:\n return []\n found = set(list(map(lambda x: x['symbol'], self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(set(self.symbols) - found)\n\n def get_complete_insider_tasks(self):\n symbols = []\n if not self.finance_db or not self.trading_date:\n return symbols\n found = set(map(lambda x: x['symbol'], list(self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(found)\n\n def start(self):\n self._reset_counters()\n if self.trading_date.weekday() > 4:\n self._log('Not running {} on weekend'.format(self.task_name))\n elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16:\n self._log('Trading day has not finished yet, {}'.format(self.\n trading_date.time()))\n else:\n self.finance_db = FinanceDB('stock_insider')\n incomplete = self.get_incomplete_insider_tasks()\n insider_transactions = InsiderTransactions(incomplete, batching\n =True)\n for insider_data in insider_transactions.generate():\n documents = []\n for symbol, data in insider_data.items():\n if data:\n data['trading_date'] = str(self.trading_date.date())\n data['symbol'] = symbol\n documents.append(data)\n self.found += 1\n else:\n self.not_found += 1\n if documents:\n self.finance_db.insert_many(documents)\n self._log('{}/{} found/not_found'.format(self.found, self.\n not_found))\n\n def sleep_time(self):\n now = datetime.now()\n if self.found + self.not_found == 0:\n if now.weekday() > 4:\n next_trading = now + timedelta(days=7 - now.weekday())\n tomorrow = datetime(year=next_trading.year, month=\n next_trading.month, day=next_trading.day, hour=16,\n minute=0, second=0)\n return (tomorrow - now).total_seconds()\n elif now.weekday() <= 4 and now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n return 900\n elif self.found == 0 and self.not_found > 0:\n if now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n tomorrow = now + timedelta(days=1)\n tomorrow = datetime(year=tomorrow.year, month=tomorrow.\n month, day=tomorrow.day, hour=16, minute=0, second=0)\n return (tomorrow - now).total_seconds()\n else:\n return 900\n\n\nif __name__ == '__main__':\n FintelInsiderAcquisition(datetime.now()).start()\n", "step-4": "from datetime import datetime, timedelta\nfrom request.insider_networking import InsiderTransactions\nfrom db import FinanceDB\nfrom acquisition.symbol.financial_symbols import Financial_Symbols\n\n\nclass FintelInsiderAcquisition:\n\n def __init__(self, trading_date=None):\n self.task_name = 'FintelInsiderAcquisition'\n self.trading_date = trading_date\n self.symbols = Financial_Symbols.get_all()\n self.finance_db = None\n self._reset_counters()\n\n def _reset_counters(self):\n self.found = 0\n self.not_found = 0\n self.symbols = Financial_Symbols.get_all()\n\n def _log(self, msg, level='info'):\n pass\n\n def get_incomplete_insider_tasks(self):\n if not self.finance_db or not self.trading_date:\n return []\n found = set(list(map(lambda x: x['symbol'], self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(set(self.symbols) - found)\n\n def get_complete_insider_tasks(self):\n symbols = []\n if not self.finance_db or not self.trading_date:\n return symbols\n found = set(map(lambda x: x['symbol'], list(self.finance_db.find({\n 'trading_date': str(self.trading_date.date())}, {'symbol': 1}))))\n return list(found)\n\n def start(self):\n self._reset_counters()\n if self.trading_date.weekday() > 4:\n self._log('Not running {} on weekend'.format(self.task_name))\n elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16:\n self._log('Trading day has not finished yet, {}'.format(self.\n trading_date.time()))\n else:\n self.finance_db = FinanceDB('stock_insider')\n incomplete = self.get_incomplete_insider_tasks()\n insider_transactions = InsiderTransactions(incomplete, batching\n =True)\n for insider_data in insider_transactions.generate():\n documents = []\n for symbol, data in insider_data.items():\n if data:\n data['trading_date'] = str(self.trading_date.date())\n data['symbol'] = symbol\n documents.append(data)\n self.found += 1\n else:\n self.not_found += 1\n if documents:\n self.finance_db.insert_many(documents)\n self._log('{}/{} found/not_found'.format(self.found, self.\n not_found))\n\n def sleep_time(self):\n now = datetime.now()\n if self.found + self.not_found == 0:\n if now.weekday() > 4:\n next_trading = now + timedelta(days=7 - now.weekday())\n tomorrow = datetime(year=next_trading.year, month=\n next_trading.month, day=next_trading.day, hour=16,\n minute=0, second=0)\n return (tomorrow - now).total_seconds()\n elif now.weekday() <= 4 and now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n return 900\n elif self.found == 0 and self.not_found > 0:\n if now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.\n day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n tomorrow = now + timedelta(days=1)\n tomorrow = datetime(year=tomorrow.year, month=tomorrow.\n month, day=tomorrow.day, hour=16, minute=0, second=0)\n return (tomorrow - now).total_seconds()\n else:\n return 900\n\n\nif __name__ == '__main__':\n FintelInsiderAcquisition(datetime.now()).start()\n", "step-5": "from datetime import datetime, timedelta\n\nfrom request.insider_networking import InsiderTransactions\nfrom db import FinanceDB\nfrom acquisition.symbol.financial_symbols import Financial_Symbols\n\nclass FintelInsiderAcquisition():\n\n def __init__(self, trading_date=None):\n self.task_name = 'FintelInsiderAcquisition'\n self.trading_date = trading_date\n self.symbols = Financial_Symbols.get_all()\n self.finance_db = None\n self._reset_counters()\n\n def _reset_counters(self):\n self.found = 0\n self.not_found = 0\n self.symbols = Financial_Symbols.get_all()\n\n def _log(self, msg, level='info'):\n pass\n\n def get_incomplete_insider_tasks(self):\n if not self.finance_db or not self.trading_date:\n return []\n found = set(list(map(lambda x: x['symbol'], self.finance_db.find({\"trading_date\": str(self.trading_date.date())}, {\"symbol\": 1}))))\n return list(set(self.symbols) - found)\n\n def get_complete_insider_tasks(self):\n symbols = []\n if not self.finance_db or not self.trading_date:\n return symbols\n found = set(map(lambda x: x['symbol'], list(self.finance_db.find({\"trading_date\": str(self.trading_date.date())}, {\"symbol\": 1}))))\n return list(found)\n\n def start(self):\n self._reset_counters()\n if self.trading_date.weekday() > 4:\n self._log('Not running {} on weekend'.format(self.task_name))\n elif self.trading_date.weekday() <= 4 and self.trading_date.hour < 16:\n self._log('Trading day has not finished yet, {}'.format(self.trading_date.time()))\n else:\n self.finance_db = FinanceDB('stock_insider')\n incomplete = self.get_incomplete_insider_tasks()\n insider_transactions = InsiderTransactions(incomplete, batching=True)\n\n for insider_data in insider_transactions.generate():\n documents = []\n for symbol, data in insider_data.items():\n if data:\n data['trading_date'] = str(self.trading_date.date())\n data['symbol'] = symbol\n documents.append(data)\n self.found += 1\n else:\n self.not_found += 1\n if documents:\n self.finance_db.insert_many(documents)\n\n self._log('{}/{} found/not_found'.format(self.found, self.not_found))\n # incomplete = len(self.get_incomplete_insider_tasks())\n # complete = len(self.get_complete_insider_tasks())\n # self._log('{}/{} complete/incomplete'.format(complete, incomplete))\n\n def sleep_time(self):\n now = datetime.now()\n if self.found + self.not_found == 0:\n if now.weekday() > 4:\n next_trading = now + timedelta(days=7-now.weekday())\n tomorrow = datetime(year=next_trading.year, month=next_trading.month, day=next_trading.day, hour=16, minute=0, second=0)\n return (tomorrow - now).total_seconds()\n elif now.weekday() <= 4 and now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n return 900\n elif self.found == 0 and self.not_found > 0:\n if now.hour < 16:\n later = datetime(year=now.year, month=now.month, day=now.day, hour=16, minute=0, second=0)\n return (later - now).total_seconds()\n else:\n tomorrow = now + timedelta(days=1)\n tomorrow = datetime(year=tomorrow.year, month=tomorrow.month, day=tomorrow.day, hour=16, minute=0, second=0)\n return (tomorrow - now).total_seconds()\n else:\n return 900\n\nif __name__ == \"__main__\":\n FintelInsiderAcquisition(datetime.now()).start()", "step-ids": [ 5, 7, 9, 10, 11 ] }
[ 5, 7, 9, 10, 11 ]
#!/usr/bin/env python #lesson4.py # See original source and C based tutorial at http://nehe.gamedev.net #This code was created by Richard Campbell '99 #(ported to Python/PyOpenGL by John Ferguson 2000) #John Ferguson at [email protected] #Code ported for use with pyglet by Jess Hill (Jestermon) 2009 #jestermon.weebly.com #[email protected] #because these lessons sometimes need openGL GLUT, you need to install #pyonlgl as well as pyglet, in order for this sample them to work #pyopengl ~ http://pyopengl.sourceforge.net #pyglet ~ http://www.pyglet.org import pyglet from pyglet.gl import * from pyglet.window import key from OpenGL.GLUT import * #<<<==Needed for GLUT calls from objloader import * from numpy import sin ##################################World class World(pyglet.window.Window): #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ objfile1 = 'resource/predator.obj' objfile2 = 'resource/A10.obj' # objfile = 'resource/complex2.obj' obj = OBJ(objfile1) # obj2 = OBJ(objfile2) def __init__(self): config = Config(sample_buffers=1, samples=4, depth_size=16, double_buffer=True,) try: super(World, self).__init__(resizable=True, config=config) except: super(World, self).__init__(resizable=True) self.setup() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def setup(self): self.width = 640 self.height = 480 self.rtri = 0.0 # (was global) self.rquad = 0.0 # (was global) self.InitGL(self.width, self.height) pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def update(self,dt): self.DrawGLScene() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_draw(self): self.DrawGLScene() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_resize(self,w,h): self.ReSizeGLScene(w,h) def MakeTransparent(self): glDisable(GL_DEPTH_TEST) glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) glEnable (GL_BLEND) #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # A general OpenGL initialization function. Sets all of the initial parameters. def InitGL(self,Width, Height): # We call this right after our OpenGL window is created. glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The background Color To Black # glClearColor(0.0, 0.0, 0.5, 1.0) # This Will Clear The background Color To Black glClearDepth(1.0) # Enables Clearing Of The Depth Buffer glDepthFunc(GL_LESS) # The Type Of Depth Test To Do glEnable(GL_DEPTH_TEST) # Enables Depth Testing glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading glMatrixMode(GL_PROJECTION) glLoadIdentity() # Reset The Projection Matrix # Calculate The Aspect Ratio Of The Window #(pyglet initializes the screen so we ignore this call) #gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) # for realisitic light diffusion effect specLight0 = [0.5, 0.5, 0.5, 1.0]; glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0); glMaterialfv(GL_FRONT, GL_SHININESS, 10.0); glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0)) dens = 0.3 glLightfv(GL_LIGHT0, GL_AMBIENT, (dens,dens,dens, 0.0)) # glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 0.0)) glEnable(GL_LIGHT0) glEnable(GL_LIGHTING) glEnable(GL_COLOR_MATERIAL) # # glutFullScreenToggle() # self.MakeTransparent() #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The function called when our window is resized (which shouldn't happen if you enable fullscreen, below) def ReSizeGLScene(self,Width, Height): if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small Height = 1 glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0) glMatrixMode(GL_MODELVIEW) def DrawHUD(self,basicT=(0,0,0)): # glMatrixMode(GL_PROJECTION) # glLoadIdentity() # glOrtho ( 0, 640, 480, 0, 0, 1 ) glMatrixMode(GL_MODELVIEW) # glTranslatef(0, 0, -30.0) pyglet.gl.glColor4f(0.0,1,0,1.0) glEnable (GL_LINE_SMOOTH); glHint (GL_LINE_SMOOTH_HINT, GL_DONT_CARE) glLineWidth (3) pyglet.graphics.draw ( 2, pyglet.gl.GL_LINES, ('v2i',(10, 15, 300, 305)) ) # glClear(GL_COLOR_BUFFER_BIT) glLoadIdentity() glTranslatef(1.0, 1.0, -6.0) # Draw a square (quadrilateral) rotated on the X axis. glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate glColor3f(1.0, 1.0, 1.0) # Bluish shade glPointSize(3.0) glBegin(GL_QUADS) # Start drawing a 4 sided polygon glVertex3f(-1.0, 1.0, 0.0) # Top Left glVertex3f(1.0, 1.0, 0.0) # Top Right glVertex3f(1.0, -1.0, 0.0) # Bottom Right glVertex3f(-1.0, -1.0, 0.0) # Bottom Left glEnd() # We are done with the polygon #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # The main drawing function. def DrawGLScene(self): global rtri, rquad # Clear The Screen And The Depth Buffer glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) basicT = (1,1,1) self.DrawHUD(basicT) glLoadIdentity() # Reset The View glTranslatef(15.0, -5, -50.0) # glTranslatef(15.0, 2*sin(self.rquad/50.)-5, -50.0) glRotatef(20*sin(self.rquad/20.), 0.1, 0.1, -1.0) # Rotate glCallList(self.obj.gl_list) # --------------------------------------------------------------------------------- # We are "undoing" the rotation so that we may rotate the quad on its own axis. # We also "undo" the prior translate. # This could also have been done using the matrix stack. # # # glLoadIdentity() # # # glTranslatef(-15.0, 0.0, -50.0) # # # glRotatef(self.rquad, 0.1, -1.0, 0.0) # Rotate # # # glCallList(self.obj2.gl_list) # glLoadIdentity() # # Move Right 1.5 units and into the screen 6.0 units. # glTranslatef(1.0, 1.0, -6.0) # # # Draw a square (quadrilateral) rotated on the X axis. # glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate # glColor3f(0.3, 0.5, 1.0) # Bluish shade # glBegin(GL_QUADS) # Start drawing a 4 sided polygon # glVertex3f(-1.0, 1.0, 0.0) # Top Left # glVertex3f(1.0, 1.0, 0.0) # Top Right # glVertex3f(1.0, -1.0, 0.0) # Bottom Right # glVertex3f(-1.0, -1.0, 0.0) # Bottom Left # glEnd() # We are done with the polygon # What values to use? Well, if you have a FAST machine and a FAST 3D Card, then # large values make an unpleasant display with flickering and tearing. I found that # smaller values work better, but this was based on my experience. #(2009.. 9 years after this code was written, this still applies.. unless you use) #(a timed display, as done here with pyglet.clock.schedule_interval(self.update, 1/60.0) #updates at 60Hz) # self.rtri = self.rtri + 1.0 # Increase The Rotation Variable For The Triangle self.rquad = self.rquad + 1.3 # Decrease The Rotation Variable For The Quad #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ def on_key_press(self, symbol, modifiers): if symbol == key.ESCAPE: self.dispatch_event('on_close') # since this is double buffered, swap the buffers to display what just got drawn. #(pyglet provides the swap, so we dont use the swap here) #glutSwapBuffers() default_size = 1024,768 screen_size1 = 640,480 if __name__ == "__main__": window = World() window.set_location(10,30) window.set_size(*screen_size1) # window.set_fullscreen(True) pyglet.app.run()
normal
{ "blob_id": "5fc097518b6069131e1ca58fa885c6ad45ae143c", "index": 4741, "step-1": "<mask token>\n\n\nclass World(pyglet.window.Window):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n <mask token>\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n <mask token>\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\ndefault_size = 1024, 768\nscreen_size1 = 640, 480\nif __name__ == '__main__':\n window = World()\n window.set_location(10, 30)\n window.set_size(*screen_size1)\n pyglet.app.run()\n", "step-4": "import pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\nfrom OpenGL.GLUT import *\nfrom objloader import *\nfrom numpy import sin\n\n\nclass World(pyglet.window.Window):\n objfile1 = 'resource/predator.obj'\n objfile2 = 'resource/A10.obj'\n obj = OBJ(objfile1)\n\n def __init__(self):\n config = Config(sample_buffers=1, samples=4, depth_size=16,\n double_buffer=True)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0\n self.rquad = 0.0\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1 / 60.0)\n\n def update(self, dt):\n self.DrawGLScene()\n\n def on_draw(self):\n self.DrawGLScene()\n\n def on_resize(self, w, h):\n self.ReSizeGLScene(w, h)\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glEnable(GL_BLEND)\n\n def InitGL(self, Width, Height):\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glMatrixMode(GL_MODELVIEW)\n specLight0 = [0.5, 0.5, 0.5, 1.0]\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0)\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0)\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n dens = 0.3\n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens, dens, dens, 0.0))\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n\n def ReSizeGLScene(self, Width, Height):\n if Height == 0:\n Height = 1\n glViewport(0, 0, Width, Height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width) / float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self, basicT=(0, 0, 0)):\n glMatrixMode(GL_MODELVIEW)\n pyglet.gl.glColor4f(0.0, 1, 0, 1.0)\n glEnable(GL_LINE_SMOOTH)\n glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glLineWidth(3)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2i', (10, 15, 300, 305))\n )\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n glRotatef(self.rquad, 0.0, 1.0, 0.0)\n glColor3f(1.0, 1.0, 1.0)\n glPointSize(3.0)\n glBegin(GL_QUADS)\n glVertex3f(-1.0, 1.0, 0.0)\n glVertex3f(1.0, 1.0, 0.0)\n glVertex3f(1.0, -1.0, 0.0)\n glVertex3f(-1.0, -1.0, 0.0)\n glEnd()\n\n def DrawGLScene(self):\n global rtri, rquad\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n basicT = 1, 1, 1\n self.DrawHUD(basicT)\n glLoadIdentity()\n glTranslatef(15.0, -5, -50.0)\n glRotatef(20 * sin(self.rquad / 20.0), 0.1, 0.1, -1.0)\n glCallList(self.obj.gl_list)\n self.rquad = self.rquad + 1.3\n\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n\n\ndefault_size = 1024, 768\nscreen_size1 = 640, 480\nif __name__ == '__main__':\n window = World()\n window.set_location(10, 30)\n window.set_size(*screen_size1)\n pyglet.app.run()\n", "step-5": "#!/usr/bin/env python\n#lesson4.py\n\n# See original source and C based tutorial at http://nehe.gamedev.net\n#This code was created by Richard Campbell '99\n\n#(ported to Python/PyOpenGL by John Ferguson 2000)\n#John Ferguson at [email protected]\n\n#Code ported for use with pyglet by Jess Hill (Jestermon) 2009\n#jestermon.weebly.com\n#[email protected]\n\n#because these lessons sometimes need openGL GLUT, you need to install\n#pyonlgl as well as pyglet, in order for this sample them to work\n#pyopengl ~ http://pyopengl.sourceforge.net\n#pyglet ~ http://www.pyglet.org\n\nimport pyglet\nfrom pyglet.gl import *\nfrom pyglet.window import key\nfrom OpenGL.GLUT import * #<<<==Needed for GLUT calls\nfrom objloader import *\nfrom numpy import sin\n\n##################################World\nclass World(pyglet.window.Window):\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n objfile1 = 'resource/predator.obj' \n objfile2 = 'resource/A10.obj' \n # objfile = 'resource/complex2.obj' \n obj = OBJ(objfile1)\n # obj2 = OBJ(objfile2)\n def __init__(self):\n config = Config(sample_buffers=1, samples=4,\n depth_size=16, double_buffer=True,)\n try:\n super(World, self).__init__(resizable=True, config=config)\n except:\n super(World, self).__init__(resizable=True)\n self.setup()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def setup(self):\n self.width = 640\n self.height = 480\n self.rtri = 0.0 # (was global)\n self.rquad = 0.0 # (was global)\n self.InitGL(self.width, self.height)\n pyglet.clock.schedule_interval(self.update, 1/60.0) # update at 60Hz\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def update(self,dt):\n self.DrawGLScene()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_draw(self):\n self.DrawGLScene()\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_resize(self,w,h):\n self.ReSizeGLScene(w,h)\n\n\n def MakeTransparent(self):\n glDisable(GL_DEPTH_TEST)\n glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) \n glEnable (GL_BLEND) \n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # A general OpenGL initialization function. Sets all of the initial parameters.\n def InitGL(self,Width, Height): # We call this right after our OpenGL window is created.\n glClearColor(0.0, 0.0, 0.0, 0.0) # This Will Clear The background Color To Black\n # glClearColor(0.0, 0.0, 0.5, 1.0) # This Will Clear The background Color To Black\n glClearDepth(1.0) # Enables Clearing Of The Depth Buffer\n glDepthFunc(GL_LESS) # The Type Of Depth Test To Do\n glEnable(GL_DEPTH_TEST) # Enables Depth Testing\n glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity() # Reset The Projection Matrix\n # Calculate The Aspect Ratio Of The Window\n #(pyglet initializes the screen so we ignore this call)\n #gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n # for realisitic light diffusion effect\n specLight0 = [0.5, 0.5, 0.5, 1.0];\n glLightfv(GL_LIGHT0, GL_SPECULAR, specLight0);\n glMaterialfv(GL_FRONT, GL_SHININESS, 10.0);\n glLightfv(GL_LIGHT0, GL_POSITION, (0, 200, 100, 0.0))\n\n dens = 0.3 \n glLightfv(GL_LIGHT0, GL_AMBIENT, (dens,dens,dens, 0.0))\n # glLightfv(GL_LIGHT0, GL_DIFFUSE, (0.5, 0.5, 0.5, 0.0))\n\n glEnable(GL_LIGHT0)\n glEnable(GL_LIGHTING)\n glEnable(GL_COLOR_MATERIAL)\n # # glutFullScreenToggle()\n\n # self.MakeTransparent()\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # The function called when our window is resized (which shouldn't happen if you enable fullscreen, below)\n def ReSizeGLScene(self,Width, Height):\n if Height == 0: # Prevent A Divide By Zero If The Window Is Too Small\n Height = 1\n glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(Width)/float(Height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n def DrawHUD(self,basicT=(0,0,0)):\n # glMatrixMode(GL_PROJECTION)\n # glLoadIdentity()\n # glOrtho ( 0, 640, 480, 0, 0, 1 )\n\n glMatrixMode(GL_MODELVIEW)\n # glTranslatef(0, 0, -30.0)\n pyglet.gl.glColor4f(0.0,1,0,1.0) \n glEnable (GL_LINE_SMOOTH); \n glHint (GL_LINE_SMOOTH_HINT, GL_DONT_CARE) \n glLineWidth (3) \n pyglet.graphics.draw ( 2, pyglet.gl.GL_LINES, ('v2i',(10, 15, 300, 305)) )\n\n # glClear(GL_COLOR_BUFFER_BIT)\n glLoadIdentity()\n glTranslatef(1.0, 1.0, -6.0)\n\n # Draw a square (quadrilateral) rotated on the X axis.\n glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate\n glColor3f(1.0, 1.0, 1.0) # Bluish shade\n glPointSize(3.0)\n\n\n\n glBegin(GL_QUADS) # Start drawing a 4 sided polygon\n glVertex3f(-1.0, 1.0, 0.0) # Top Left\n glVertex3f(1.0, 1.0, 0.0) # Top Right\n glVertex3f(1.0, -1.0, 0.0) # Bottom Right\n glVertex3f(-1.0, -1.0, 0.0) # Bottom Left\n glEnd() # We are done with the polygon\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # The main drawing function.\n def DrawGLScene(self):\n global rtri, rquad\n\n # Clear The Screen And The Depth Buffer\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n basicT = (1,1,1)\n\n self.DrawHUD(basicT)\n\n glLoadIdentity() # Reset The View\n glTranslatef(15.0, -5, -50.0)\n # glTranslatef(15.0, 2*sin(self.rquad/50.)-5, -50.0)\n glRotatef(20*sin(self.rquad/20.), 0.1, 0.1, -1.0) # Rotate\n glCallList(self.obj.gl_list)\n\n# ---------------------------------------------------------------------------------\n # We are \"undoing\" the rotation so that we may rotate the quad on its own axis.\n # We also \"undo\" the prior translate. \n # This could also have been done using the matrix stack.\n\n # # # glLoadIdentity()\n # # # glTranslatef(-15.0, 0.0, -50.0)\n # # # glRotatef(self.rquad, 0.1, -1.0, 0.0) # Rotate\n # # # glCallList(self.obj2.gl_list)\n\n # glLoadIdentity()\n # # Move Right 1.5 units and into the screen 6.0 units.\n # glTranslatef(1.0, 1.0, -6.0)\n# \n # # Draw a square (quadrilateral) rotated on the X axis.\n # glRotatef(self.rquad, 0.0, 1.0, 0.0) # Rotate\n # glColor3f(0.3, 0.5, 1.0) # Bluish shade\n # glBegin(GL_QUADS) # Start drawing a 4 sided polygon\n # glVertex3f(-1.0, 1.0, 0.0) # Top Left\n # glVertex3f(1.0, 1.0, 0.0) # Top Right\n # glVertex3f(1.0, -1.0, 0.0) # Bottom Right\n # glVertex3f(-1.0, -1.0, 0.0) # Bottom Left\n # glEnd() # We are done with the polygon\n\n # What values to use? Well, if you have a FAST machine and a FAST 3D Card, then\n # large values make an unpleasant display with flickering and tearing. I found that\n # smaller values work better, but this was based on my experience.\n #(2009.. 9 years after this code was written, this still applies.. unless you use)\n #(a timed display, as done here with pyglet.clock.schedule_interval(self.update, 1/60.0) #updates at 60Hz)\n # self.rtri = self.rtri + 1.0 # Increase The Rotation Variable For The Triangle\n self.rquad = self.rquad + 1.3 # Decrease The Rotation Variable For The Quad\n\n\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n def on_key_press(self, symbol, modifiers):\n if symbol == key.ESCAPE:\n self.dispatch_event('on_close')\n # since this is double buffered, swap the buffers to display what just got drawn.\n #(pyglet provides the swap, so we dont use the swap here)\n #glutSwapBuffers()\n\ndefault_size = 1024,768 \nscreen_size1 = 640,480 \nif __name__ == \"__main__\":\n window = World()\n window.set_location(10,30)\n window.set_size(*screen_size1)\n # window.set_fullscreen(True)\n pyglet.app.run()\n\n\n\n\n\n", "step-ids": [ 9, 13, 15, 16, 17 ] }
[ 9, 13, 15, 16, 17 ]
# MEDIUM # TLE if decrement divisor only # Bit manipulation. # input: 100 / 3 # times = 0 # 3 << 0 = 3 # 3 << 1 = 6 # 3 << 2 = 12 # 3 << 3 = 24 # 3 << 4 = 48 # 3 << 5 = 96 # 3 << 6 = 192 => greater than dividend 100 => stop here # times -=1 because 3 << 6 is too big # result += 1 << times => divided by 32 # set dividend to dividend -= divisor << times # times O(log N) Space O(1) class Solution: def divide(self, dividend: int, divisor: int) -> int: if dividend == -2**31 and divisor == -1: return 2**31-1 if dividend == 0: return 0 sign = dividend>=0 and divisor>=0 or (dividend<0 and divisor<0) left,right = abs(dividend),abs(divisor) result = 0 while left>= right: count = 0 while left >= right<< count: count += 1 #print('count',count) # count -1 because right * count > left result += 1 << (count-1) #print("result",result) left -= right << (count-1) #print("dividend",left) return result if sign else -result
normal
{ "blob_id": "d1864f454b1909196fd9a6e2279b23f4c4148917", "index": 7232, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def divide(self, dividend: int, divisor: int) ->int:\n if dividend == -2 ** 31 and divisor == -1:\n return 2 ** 31 - 1\n if dividend == 0:\n return 0\n sign = dividend >= 0 and divisor >= 0 or dividend < 0 and divisor < 0\n left, right = abs(dividend), abs(divisor)\n result = 0\n while left >= right:\n count = 0\n while left >= right << count:\n count += 1\n result += 1 << count - 1\n left -= right << count - 1\n return result if sign else -result\n", "step-4": "# MEDIUM\n# TLE if decrement divisor only \n\n# Bit manipulation.\n# input: 100 / 3 \n\n# times = 0\n# 3 << 0 = 3\n# 3 << 1 = 6\n# 3 << 2 = 12\n# 3 << 3 = 24\n# 3 << 4 = 48\n# 3 << 5 = 96\n# 3 << 6 = 192 => greater than dividend 100 => stop here \n# times -=1 because 3 << 6 is too big \n# result += 1 << times => divided by 32 \n# set dividend to dividend -= divisor << times \n\n# times O(log N) Space O(1)\n\n\nclass Solution:\n def divide(self, dividend: int, divisor: int) -> int:\n if dividend == -2**31 and divisor == -1:\n return 2**31-1\n if dividend == 0:\n return 0\n sign = dividend>=0 and divisor>=0 or (dividend<0 and divisor<0)\n left,right = abs(dividend),abs(divisor)\n result = 0\n while left>= right:\n count = 0\n while left >= right<< count:\n \n count += 1\n \n #print('count',count)\n # count -1 because right * count > left\n result += 1 << (count-1)\n #print(\"result\",result)\n left -= right << (count-1)\n #print(\"dividend\",left)\n \n return result if sign else -result \n \n ", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import datetime class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Author', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')), ('name', models.CharField(max_length=60)), ('email', models.EmailField(max_length=100)), ('telephone', models.CharField(max_length=12)), ('cellphone', models.CharField(max_length=12)), ('img', models.ImageField(upload_to='')), ('role', models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1, 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')])), ], ), migrations.CreateModel( name='Preach', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')), ('title', models.CharField(max_length=60)), ('summary', models.CharField(blank=True, max_length=500)), ('date', models.DateField()), ('published_date', models.DateField(default=datetime.datetime(2017, 5, 7, 2, 3, 52, 71419))), ('url', models.URLField()), ('img', models.ImageField(verbose_name='Imagen', upload_to='images')), ('author', models.ForeignKey(to='preaches.Author')), ], ), migrations.CreateModel( name='Social_media', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')), ('name', models.IntegerField(default=0, verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (1, 'Instagram'), (2, 'Twitter')])), ('url', models.URLField()), ], ), migrations.CreateModel( name='Tags', fields=[ ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')), ('name', models.CharField(verbose_name='Categoria', max_length=80)), ], ), migrations.AddField( model_name='preach', name='tags', field=models.ManyToManyField(to='preaches.Tags'), ), migrations.AddField( model_name='author', name='social_media', field=models.ManyToManyField(to='preaches.Social_media'), ), ]
normal
{ "blob_id": "4a118f9081a8b3baf0b074c8dc14eaeef4559c08", "index": 6684, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Author', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('name', models.CharField(max_length=60)\n ), ('email', models.EmailField(max_length=100)), ('telephone',\n models.CharField(max_length=12)), ('cellphone', models.CharField(\n max_length=12)), ('img', models.ImageField(upload_to='')), ('role',\n models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1,\n 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')]))]), migrations.\n CreateModel(name='Preach', fields=[('id', models.AutoField(\n auto_created=True, serialize=False, primary_key=True, verbose_name=\n 'ID')), ('title', models.CharField(max_length=60)), ('summary',\n models.CharField(blank=True, max_length=500)), ('date', models.\n DateField()), ('published_date', models.DateField(default=datetime.\n datetime(2017, 5, 7, 2, 3, 52, 71419))), ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images'\n )), ('author', models.ForeignKey(to='preaches.Author'))]),\n migrations.CreateModel(name='Social_media', fields=[('id', models.\n AutoField(auto_created=True, serialize=False, primary_key=True,\n verbose_name='ID')), ('name', models.IntegerField(default=0,\n verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (\n 1, 'Instagram'), (2, 'Twitter')])), ('url', models.URLField())]),\n migrations.CreateModel(name='Tags', fields=[('id', models.AutoField\n (auto_created=True, serialize=False, primary_key=True, verbose_name\n ='ID')), ('name', models.CharField(verbose_name='Categoria',\n max_length=80))]), migrations.AddField(model_name='preach', name=\n 'tags', field=models.ManyToManyField(to='preaches.Tags')),\n migrations.AddField(model_name='author', name='social_media', field\n =models.ManyToManyField(to='preaches.Social_media'))]\n", "step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Author', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('name', models.CharField(max_length=60)\n ), ('email', models.EmailField(max_length=100)), ('telephone',\n models.CharField(max_length=12)), ('cellphone', models.CharField(\n max_length=12)), ('img', models.ImageField(upload_to='')), ('role',\n models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1,\n 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')]))]), migrations.\n CreateModel(name='Preach', fields=[('id', models.AutoField(\n auto_created=True, serialize=False, primary_key=True, verbose_name=\n 'ID')), ('title', models.CharField(max_length=60)), ('summary',\n models.CharField(blank=True, max_length=500)), ('date', models.\n DateField()), ('published_date', models.DateField(default=datetime.\n datetime(2017, 5, 7, 2, 3, 52, 71419))), ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images'\n )), ('author', models.ForeignKey(to='preaches.Author'))]),\n migrations.CreateModel(name='Social_media', fields=[('id', models.\n AutoField(auto_created=True, serialize=False, primary_key=True,\n verbose_name='ID')), ('name', models.IntegerField(default=0,\n verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (\n 1, 'Instagram'), (2, 'Twitter')])), ('url', models.URLField())]),\n migrations.CreateModel(name='Tags', fields=[('id', models.AutoField\n (auto_created=True, serialize=False, primary_key=True, verbose_name\n ='ID')), ('name', models.CharField(verbose_name='Categoria',\n max_length=80))]), migrations.AddField(model_name='preach', name=\n 'tags', field=models.ManyToManyField(to='preaches.Tags')),\n migrations.AddField(model_name='author', name='social_media', field\n =models.ManyToManyField(to='preaches.Social_media'))]\n", "step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(max_length=60)),\n ('email', models.EmailField(max_length=100)),\n ('telephone', models.CharField(max_length=12)),\n ('cellphone', models.CharField(max_length=12)),\n ('img', models.ImageField(upload_to='')),\n ('role', models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1, 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')])),\n ],\n ),\n migrations.CreateModel(\n name='Preach',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=60)),\n ('summary', models.CharField(blank=True, max_length=500)),\n ('date', models.DateField()),\n ('published_date', models.DateField(default=datetime.datetime(2017, 5, 7, 2, 3, 52, 71419))),\n ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images')),\n ('author', models.ForeignKey(to='preaches.Author')),\n ],\n ),\n migrations.CreateModel(\n name='Social_media',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.IntegerField(default=0, verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (1, 'Instagram'), (2, 'Twitter')])),\n ('url', models.URLField()),\n ],\n ),\n migrations.CreateModel(\n name='Tags',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='Categoria', max_length=80)),\n ],\n ),\n migrations.AddField(\n model_name='preach',\n name='tags',\n field=models.ManyToManyField(to='preaches.Tags'),\n ),\n migrations.AddField(\n model_name='author',\n name='social_media',\n field=models.ManyToManyField(to='preaches.Social_media'),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def parse(num): strnum = str(num) words = [] for item in range(len(strnum)-1, -1, -1): words.append(strnum[item]) hundred = words[:3] thousand = words[3:6] million = words[6:len(words)] hundred = hundred[::-1] thousand = thousand[::-1] million = million[::-1] units = ['zero','one','two','three','four','five','six','seven','eight','nine'] tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen'] tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety'] reads = [] if len(million)>0: if len(million)==3: num = int(million[0]) reads.append(units[num]) reads.append('hundred') reads.append('and') num = int(million[1]) if num>1: reads.append(tens_more[num]) if num!=0: num = int(million[2]) reads.append(units[num]) else: num = int(million[1]) reads.append(tens[num]) if len(million)==2: num = int(million[0]) if num>1: reads.append(tens_more[num]) num = int(million[1]) if num!=0: reads.append(units[num]) else: num = int(million[1]) reads.append(tens[num]) if len(million)==1: num = int(million[0]) reads.append(units[num]) reads.append('million') reads.append('and') if __name__ == "__main__": parse(23456789)
normal
{ "blob_id": "843901b65a556e57470f73be2657e9fd3c0facc6", "index": 9721, "step-1": "<mask token>\n", "step-2": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\n<mask token>\n", "step-3": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum) - 1, -1, -1):\n words.append(strnum[item])\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n units = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n tens = ['ten', 'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen',\n 'sixteen', 'seventeen', 'eighteen', 'nineteen']\n tens_more = ['zero', 'ten', 'twenty', 'thirty', 'forty', 'fifty',\n 'sixty', 'seventy', 'eighty', 'ninety']\n reads = []\n if len(million) > 0:\n if len(million) == 3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n num = int(million[1])\n if num > 1:\n reads.append(tens_more[num])\n if num != 0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 2:\n num = int(million[0])\n if num > 1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num != 0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n if len(million) == 1:\n num = int(million[0])\n reads.append(units[num])\n reads.append('million')\n reads.append('and')\n\n\nif __name__ == '__main__':\n parse(23456789)\n", "step-4": "def parse(num):\n strnum = str(num)\n words = []\n for item in range(len(strnum)-1, -1, -1):\n words.append(strnum[item])\n\n hundred = words[:3]\n thousand = words[3:6]\n million = words[6:len(words)]\n\n hundred = hundred[::-1]\n thousand = thousand[::-1]\n million = million[::-1]\n\n units = ['zero','one','two','three','four','five','six','seven','eight','nine']\n tens = ['ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen']\n tens_more = ['zero','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety']\n\n reads = []\n if len(million)>0:\n if len(million)==3:\n num = int(million[0])\n reads.append(units[num])\n reads.append('hundred')\n reads.append('and')\n\n num = int(million[1])\n if num>1:\n reads.append(tens_more[num])\n if num!=0:\n num = int(million[2])\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n\n if len(million)==2:\n num = int(million[0])\n if num>1:\n reads.append(tens_more[num])\n num = int(million[1])\n if num!=0:\n reads.append(units[num])\n else:\n num = int(million[1])\n reads.append(tens[num])\n \n if len(million)==1:\n num = int(million[0])\n reads.append(units[num])\n\n reads.append('million')\n reads.append('and')\n\nif __name__ == \"__main__\":\n parse(23456789)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python # # # This is the Hydra slave module
normal
{ "blob_id": "95cdf6a22655d500c2838899ec9dfbff637a5969", "index": 2097, "step-1": "#!/usr/bin/python\n#\n#\n\n# This is the Hydra slave module\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 1 ] }
[ 1 ]
#Arushi Patel (aruship) from tkinter import * import random ###################################### #images taken from wikipedia,pixabay, #trans americas, clipartpanda,pngimg, #findicons, microsoft word ###################################### #################################### # init #################################### def init(data): data.score =0 data.mode = "splashScreen" data.timerDelay = 100 data.height = 800 data.width = 800 data.speed = 10 data.speedAI = 12 data.speedAI2 = 12 data.switchOnProgress = False data.r = 25 data.cx= 280 data.cy=750 data.onLeft1, data.onLeft2 = True, True data.win= False data.coconuts = [] data.powerUps = [] data.coconuts1 = [] data.coconuts2 = [] data.coconutsAI1 =[] data.coconutsAI2 = [] data.invincible = [] data.pauseDrops = False data.pause1Drop = False data.pause2Drop = False init1(data) def init1(data): data.beInvincible = False data.Invincible1 = False data.Invincible2 = False data.scaryBug = [] data.time = 0 data.coconutFall = False data.sides = ["r", "l"] data.level = 1 data.splashScreenTime = 0 data.splashScreenDrops = [] data.background= PhotoImage(file="tree.gif") data.deadScreen = PhotoImage(file = "deadBug.gif") data.ladyBug = PhotoImage(file = "lady.gif") data.winScreen= PhotoImage(file = "treeTop1.gif") data.winBug = PhotoImage(file = "littleBug.gif") data.halfBackground = PhotoImage(file = "halfTree.gif") data.umbrella = PhotoImage(file = "umbrella2.gif") data.spider = PhotoImage(file = "spider.gif") data.hourGlass = PhotoImage(file = "hourGlass.gif") data.splashScreen = PhotoImage(file = "splash.gif") init2(data) def init2(data): data.tbg= PhotoImage(file = "tbg2.gif") data.click = PhotoImage(file = "click.gif") data.notClick = PhotoImage(file = "notClick.gif") data.player1X = 150 data.player1Y = 750 data.player2X = 550 data.player2Y = 750 data.winner = None data.speed = 12 data.speed2 = 12 data.editorTime = 0 data.editorDrops = [] data.margin = 100 data.enter = False data.powerUpsEditor = None data.yourSpeed = None data.rainSpeed = None data.slow= data.notClick data.medium = data.notClick data.fast = data.notClick data.drizzle = data.notClick data.rain =data.notClick data.thunderstorm = data.notClick init3(data) def init3(data): data.yes = data.notClick data.no = data.notClick data.enter = data.notClick data.levelEditorLives =2 data.rSpeed = None data.start = None data.start1 = None data.start2 = None data.difficulty = None data.mode1 = data.notClick data.mode2 = data.notClick data.mode3 = data.notClick data.mode4 = data.notClick data.mode5 = data.notClick data.mode6 = data.notClick data.home = PhotoImage(file = "home.gif") data.helpScreen = PhotoImage(file = "help1.gif") data.title = PhotoImage(file = "title.gif") data.scoreList = [] data.spotList = [270,364,458,552, 646, 740] data.savedScores = readFile("score.txt") if data.mode == "levelCreated": setEverything(data) initsplashScreenNumbers(data) def initsplashScreenNumbers(data): data.splashButtonY = 425 data.p1ButtonX= 225 data.p2ButtonX = 290 data.edButton = 355 data.diffButton = 425 data.helpButton = 490 data.sboardButton = 555 data.hitPenalty = 75 data.splashText = data.height/2-20 data.lives = 2 data.levelMax = 8 data.lane = 94 data.Player1Min= 270 data.Player1Max = 740 data.homeX =50 data.homeY = 650 initScoreBoardHelp(data) init1Player(data) def initScoreBoardHelp(data): data.tbgY=5*data.height/12 data.txtTScore = 150 data.S_P = 220 data.numScores = 5 data.scorePos = data.height/10 data.scoreShift = 270 data.helpY = data.height/2-20 data.name = "" data.printName = "" data.hit = False initAI(data) def init1Player(data): data.buffer = 40 def initAI(data): data.AITY = 225 data.easyX = 200 data.easyY = 300 data.medX =400 data.hardX = 600 data.enterY = 450 data.difS = 4 data.difM = 6 data.difH = 8 data.last = 500 data.enterX = 575 data.PUT = 450 data.RST = 350 data.YST = 250 #################################### # mode dispatcher #################################### def mousePressed(event, data): if (data.mode == "splashScreen"): splashScreenMousePressed(event, data) elif (data.mode == "1Player"): playerMousePressed(event, data) elif (data.mode == "2Player"): twoPlayerMousePressed(event, data) elif (data.mode == "editor"): editorMousePressed(event,data) elif (data.mode == "levelCreated"): levelCreatedMousePressed(event,data) elif (data.mode == "AI"): AIMousePressed(event, data) elif (data.mode == "difficulty"): difficultyMousePressed(event, data) elif (data.mode == "scoreboard"): scoreboardMousePressed(event, data) elif (data.mode == "help"): helpMousePressed(event, data) def keyPressed(event, data): if (data.mode == "splashScreen"): splashKeyPressed(event, data) elif (data.mode == "1Player"):playerKeyPressed(event, data) elif (data.mode == "2Player"):twoPlayerKeyPressed(event, data) elif (data.mode == "editor"): editorKeyPressed(event, data) elif (data.mode == "levelCreated"): levelCreatedKeyPressed(event,data) elif (data.mode == "AI"): AIKeyPressed(event, data) elif (data.mode == "difficulty"): difficultyKeyPressed(event, data) elif (data.mode == "scoreboard"): scoreboardKeyPressed(event, data) elif (data.mode == "help"): helpKeyPressed(event, data) def timerFired(data): if (data.mode == "splashScreen"): splashScreenTimerFired(data) elif (data.mode == "1Player"):playerTimerFired(data) elif (data.mode == "2Player"):twoPlayerTimerFired(data) elif (data.mode == "editor"): editorTimerFired(data) elif (data.mode == "levelCreated"): levelCreatedTimerFired(data) elif (data.mode == "AI"): AITimerFired(data) elif (data.mode == "difficulty"): difficultyTimerFired(data) elif (data.mode == "scoreboard"): scoreboardTimerFired(data) elif (data.mode == "help"): helpTimerFired(data) def redrawAll(canvas, data): if (data.mode == "splashScreen"): splashScreenRedrawAll(canvas, data) elif (data.mode == "1Player"):playerRedrawAll(canvas, data) elif (data.mode == "2Player"):twoPlayerRedrawAll(canvas, data) elif (data.mode == "editor"): editorRedrawAll(canvas, data) elif (data.mode == "levelCreated"): levelCreatedRedrawAll(canvas,data) elif (data.mode == "AI"): AIRedrawAll(canvas, data) elif (data.mode == "difficulty"): difficultyRedrawAll(canvas, data) elif (data.mode == "scoreboard"): scoreboardRedrawAll(canvas, data) elif (data.mode == "help"): helpRedrawAll(canvas, data) #################################### # splashScreen mode #################################### def splashScreenMousePressed(event, data): #checks for selection of mode if data.splashButtonY-2*data.r <= event.x <=data.splashButtonY+2*data.r: if data.p1ButtonX-data.r<=event.y<=data.p1ButtonX+data.r: data.mode = "1Player" if data.p2ButtonX-data.r<=event.y<=data.p2ButtonX+data.r: data.mode = "2Player" if data.edButton-data.r<=event.y<=data.edButton+data.r: data.mode = "editor" if data.diffButton-data.r<=event.y<=data.diffButton+data.r: data.mode = "difficulty" if data.helpButton-data.r<=event.y<=data.helpButton+data.r: data.mode = "help" if data.sboardButton-data.r<=event.y<=data.sboardButton+data.r: data.mode = "scoreboard" def splashKeyPressed(event, data): pass def splashScreenTimerFired(data): data.splashScreenTime += 1 if data.splashScreenTime %2 ==1: rainDropSplash(data) for drop in data.splashScreenDrops: drop.onTimerFired(data) def splashScreenButtons(canvas, data): canvas.create_image(data.splashButtonY,data.p1ButtonX,image = data.mode1) canvas.create_image(data.splashButtonY,data.p2ButtonX,image = data.mode2) canvas.create_image(data.splashButtonY,data.edButton,image = data.mode3) canvas.create_image(data.splashButtonY,data.diffButton,image = data.mode4) canvas.create_image(data.splashButtonY,data.helpButton,image = data.mode5) canvas.create_image(data.splashButtonY,data.sboardButton,image =data.mode6) def rainDropSplash(data): xPosition = random.randint(0,800) data.splashScreenDrops.append(Coconuts(xPosition,0)) def splashScreenRedrawAll(canvas, data): canvas.create_image(data.width/2, data.splashText-10, image=data.title) for drop in data.splashScreenDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.splashText, text=""" 1.) Single Player Level Mode 2.) Two-Player Mode 3.) Level Creator Practice Mode 4.) Play Against the Computer 5.) Help and Instructions 6.) Scoreboard """, font="Arial 14 bold", fill = "yellow") splashScreenButtons(canvas, data) #################################### # taken from class notes #################################### def writeFile(path, contents): with open(path, "wt") as f: f.write(contents) def readFile(path): with open(path, "rt") as f: return f.read() #################################### # 1Player mode #################################### #Coconuts (from Mario game) represent the water drops class Coconuts(object): def __init__(self,x,y): self.x = x self.y = y self.r = 9 self.fill = "deep sky blue" self.speed = 30 self.outline= "blue" def draw(self, canvas): canvas.create_polygon(self.x,self.y- 2*self.r, self.x-self.r, self.y, self.x, self.y + self.r, self.x+self.r, self.y, fill = self.fill, outline = self.outline, width = 3) def onTimerFired(self, data): # downward falling motion self.y += self.speed def hit(data): #checks for hitting rain for coconut in data.coconuts: if data.mode == "1Player" or data.mode == "levelCreated": if coconut.y>=data.cy-data.r and coconut.y<=data.cy+data.r: if coconut.x>=data.cx-data.r and coconut.x<=data.cx+data.r: data.cy+=data.hitPenalty if data.mode == "levelCreated": data.lives-=1 elif data.hit ==False and data.level<data.levelMax: data.score -=data.level data.coconuts.remove(coconut) if data.mode == "levelCreated": data.levelEditorLives-=1 def hit2Player(data): if data.mode == "2Player": if data.Invincible1 == False: #only when powerup isn't active for coconut in data.coconuts1: if coconut.y>=data.player1Y-data.r \ and coconut.y<=data.player1Y+data.r: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r: data.player1Y+=data.hitPenalty data.coconuts1.remove(coconut) if data.Invincible2 == False: #only when powerup isn't active for coconut in data.coconuts2: if coconut.y>=data.player2Y-data.r and \ coconut.y<=data.player2Y+data.r: if coconut.x>=data.player2X-data.r and \ coconut.x<=data.player2X+data.r: data.player2Y+=data.hitPenalty data.coconuts2.remove(coconut) class PowerUps(Coconuts): def __init__(self,x,y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.hourGlass) def hitPause(data): # checks if hits hour-glass & pauses with flag for powerUp in data.powerUps: if data.mode == "1Player" or data.mode == "levelCreated": if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r: if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r: data.pauseDrops = True data.start = data.cy data.powerUps.remove(powerUp) elif data.mode == "2Player" or data.mode == "AI": if powerUp.y>=data.player1Y-data.r and \ powerUp.y<=data.player1Y+data.r: if powerUp.x>=data.player1X-data.r and \ powerUp.x<=data.player1X+data.r: data.pause1Drop = True data.start1 = data.player1Y data.powerUps.remove(powerUp) if powerUp.y>=data.player2Y-data.r and \ powerUp.y<=data.player2Y+data.r: if powerUp.x>=data.player2X-data.r and \ powerUp.x<=data.player2X+data.r: data.pause2Drop = True data.start2 = data.player2Y data.powerUps.remove(powerUp) class Invincible(PowerUps): def __init__(self,x,y): super().__init__(x, y) def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.umbrella) def hitInvincible(data): #checks if hits umbrella powerup for powerUp in data.invincible: if data.mode == "1Player" or data.mode == "levelCreated": if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r: if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r: data.beInvincible = True data.start = data.cy data.invincible.remove(powerUp) if data.mode == "2Player" or data.mode == "AI": #for player1 if powerUp.y>=data.player1Y-data.r and \ powerUp.y<=data.player1Y+data.r: if powerUp.x>=data.player1X-data.r and \ powerUp.x<=data.player1X+data.r: data.Invincible1=True data.start1 = data.player1Y data.invincible.remove(powerUp) # for player 2 if powerUp.y>=data.player2Y-data.r and \ powerUp.y<=data.player2Y+data.r: if powerUp.x>=data.player2X-data.r and \ powerUp.x<=data.player2X+data.r: data.Invincible2=True data.start2 = data.player2Y data.invincible.remove(powerUp) class ScaryBug(object): def __init__(self,x,y): self.x = x self.y = y self.speed = 25 def draw(self, canvas, data): canvas.create_image(self.x, self.y, image=data.spider) def onTimerFired(self, data): if data.mode =="2Player" or data.mode == "AI": self.speed = 35 self.y -= self.speed if data.mode == "1Player" or data.mode == "levelCreated" and\ data.time %8 ==0: #makes spider dynamically move side = random.choice(data.sides) if side == "l": if self.x -data.lane >=data.Player1Min:self.x-=data.lane else: self.x+=data.lane elif side == "r": if self.x+data.lane<= data.Player1Max:self.x +=data.lane else: self.x -=data.lane def hitScaryBug(data): # checks for automatic death by spider for bug in data.scaryBug: if data.mode == "1Player" or data.mode == "levelCreated": if bug.y>=data.cy-1.5*data.r and bug.y<=data.cy+1.5*data.r: if bug.x>=data.cx-1.5*data.r and bug.x<=data.cx+1.5*data.r: data.hit = True data.lives = 0 data.levelEditorLives = 0 if data.mode == "2Player" or data.mode == "AI": if bug.y>=data.player1Y-data.r and bug.y<=data.player1Y+data.r: if bug.x>=data.player1X-data.r and bug.x<=data.player1X+data.r: data.winner= "player2" if bug.y>=data.player2Y-data.r and bug.y<=data.player2Y+data.r: if bug.x>=data.player2X-data.r and bug.x<=data.player2X+data.r: data.winner= "player1" def drawPowerups(canvas, data): for bug in data.scaryBug: bug.draw(canvas, data) for powerUp in data.powerUps: powerUp.draw(canvas, data) for powerUp in data.invincible: powerUp.draw(canvas, data) def drawHome(canvas, data): #home button in every screen canvas.create_image(data.homeX,data.homeY, image= data.home) def checkHome(event, data): if data.homeY-data.r<= event.y <= data.homeY +data.r: if data.homeX-data.r<= event.x<=data.homeX+ data.r: init(data) def coconutShot(data): if data.level >0 and data.pauseDrops == False: if data.time%int(data.levelMax/data.level) == 0 or data.time%6==0: #increases drops as level increases xPosition1 = random.randint(0,data.Player1Min-data.buffer) xPosition2 = random.randint(data.Player1Max+data.buffer, data.width +data.buffer) data.coconuts.append(Coconuts(xPosition1,0)) data.coconuts.append(Coconuts(xPosition2,0)) xPosition4 = random.randint(data.Player1Min-data.buffer, data.Player1Max+data.buffer) data.coconuts.append(Coconuts(xPosition4,0)) if data.time %5 ==0: xPosition3 = random.randint(0, data.Player1Min-data.buffer) data.coconuts.append(Coconuts(xPosition3,0)) if data.time % int(24/data.level) ==0: side = random.choice(data.sides) if side == "l": data.coconuts.append(Coconuts(data.Player1Min,0)) elif side =="r": data.coconuts.append(Coconuts(data.Player1Max,0)) powerUpCoconutShot(data) def powerUpCoconutShot(data): #adds powerUps #magic #s toallow for powerups to be added at different times if data.time % 60 == 0 and data.time%120 !=0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position,0)) if data.time%50 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position,0)) if data.time %100==0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position,750)) def playerKeyPressed(event,data): if data.level<data.levelMax and event.keysym == "r": init(data) if (event.keysym == "Left") and data.cx>=data.Player1Min+(data.lane/2): data.cx -=(data.lane)/2 elif(event.keysym == "Right") and data.cx<=data.Player1Max: data.cx +=(data.lane)/2 if data.level >= data.levelMax: #enter name for scoreboard if len(event.keysym) ==1: if len(data.name) <15: data.name += event.keysym if event.keysym=="BackSpace": data.name = data.name[0:-1] if event.keysym == "Return": data.scoreList += ((data.score, data.name)) #saves file writeFile("score.txt", data.savedScores+str(data.score)+","+data.name+"\n") data.mode ="scoreboard" def playerMousePressed(event, data): checkHome(event, data) def playerTimerFired(data): #actually pauses, and moves drops/player if data.hit== False and data.level<data.levelMax: data.cy-=data.speed if data.time%5 ==0: data.score +=data.level if data.cy < 15: #basically made it to the top data.level +=1 data.cy = data.Player1Max + 10 data.speed +=2 if data.cy>40: #so drops you can't see don't hit you data.time +=1 if data.pauseDrops !=True: coconutShot(data) for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: # only want drops to move if not paused if data.pauseDrops == False: coconut.onTimerFired(data) if data.beInvincible == False:hit(data) if data.start != None: if abs(data.start-data.cy) >= 120: #to limit time for powerups to be active data.pauseDrops, data.beInvincible = False, False def playerRedrawAll(canvas, data): # magic #s mainly for screen placement canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width/6,50, text ="Level: %d" %data.level, font = "Arial 18 bold", fill = "yellow") canvas.create_text(data.width/6,80, text ="Score: %d" %data.score, font = "Arial 18 bold", fill = "yellow") canvas.create_text(2*data.width/3,660, text ="""The greater the level, the more points get added to your score!""", font = "Arial 15 bold", fill = "yellow") if data.hit== True: canvas.create_rectangle(0,0,data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.deadScreen) canvas.create_text(data.width/2,data.height/4, text = "You Lose! Better Luck Next Time!", font = "Helvetica 23 bold", fill = "yellow") canvas.create_text(data.width/2,280, text ="Score: %d" %data.score, font = "Arial 13 bold", fill = "yellow") if data.level >= 8: madeIt(canvas, data) drawHome(canvas, data) def madeIt(canvas, data):# magic #s mainly for screen placement canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,70, text = "You Made it!", font = "Arial 23 bold", fill = "yellow") canvas.create_text(data.width/2,100, text ="Score: %d" %data.score, font = "Arial 15 bold", fill = "yellow") canvas.create_text(data.width/2,375, text ="Congrats! Enter your Name!", font = "Arial 15 bold", fill = "yellow") canvas.create_rectangle(data.width/2 - 50, 400, data.width/2+50, 450, fill = "white") canvas.create_text(data.width/2, 425, text = data.name) #################################### # 2Player mode #################################### def drop2Player(data): #adds drops when not paused #magic #s are position of where drops are starting if data.winner ==None and data.pauseDrops == False: if data.time%15==0: xPosition1 = random.randint(0,385) if abs(xPosition1 - 100)>25 and abs(xPosition1 - 360)>25: #so random drops don't interfere with the lane ones if data.pause1Drop != True: data.coconuts1.append(Coconuts(xPosition1,0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(xPosition1 +410,0)) if data.time % 12 ==0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.coconuts1.append(Coconuts(140,0)) if data.pause2Drop != True: data.coconuts2.append(Coconuts(540,0)) elif side =="r": if data.pause1Drop !=True:data.coconuts1.append(Coconuts(344,0)) if data.pause2Drop!=True:data.coconuts2.append(Coconuts(755,0)) powerupDrop2Player(data) def powerupDrop2Player(data): #adds powerups on both screens (in the same position) if data.time % 45 == 0 and data.time%90 !=0: #randomize placement side = random.choice(data.sides) if side == "l": if data.pause1Drop!=True:data.powerUps.append(PowerUps(140,0)) if data.pause2Drop!=True:data.powerUps.append(PowerUps(540,0)) elif side =="r": if data.pause1Drop!=True:data.powerUps.append(PowerUps(344,0)) if data.pause2Drop!=True:data.powerUps.append(PowerUps(755,0)) if data.time%60 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop!=True:data.invincible.append(Invincible(140,0)) if data.pause2Drop!=True:data.invincible.append(Invincible(540,0)) elif side =="r": if data.pause1Drop!=True:data.invincible.append(Invincible(344,0)) if data.pause2Drop!=True:data.invincible.append(Invincible(755,0)) if data.time %90==0: side = random.choice(data.sides) if side == "l": data.scaryBug.append(ScaryBug(140,750)) data.scaryBug.append(ScaryBug(540,750)) elif side =="r": data.scaryBug.append(ScaryBug(344,750)) data.scaryBug.append(ScaryBug(755,750)) def twoPlayerKeyPressed(event,data): # controllers for both bugs if event.keysym == "r": init(data) if data.winner==None: if (event.keysym == "a") and data.onLeft1==False: data.onLeft1 = True data.player1X = 150 if(event.keysym == "d") and data.onLeft1== True: data.onLeft1 = False data.player1X = 330 if (event.keysym == "Left") and data.onLeft2==False: data.onLeft2 = True data.player2X = 550 if(event.keysym == "Right") and data.onLeft2 == True: data.onLeft2 = False data.player2X = 750 def twoPlayerMousePressed(event, data): checkHome(event, data) def twoPlayerTimerFired(data): if data.winner == None: data.player1Y-=data.speed #<15 signifies that lady bug reached the top if data.player1Y < 15 and data.player2Y >15: data.winner= "player1" if data.player1Y>40: data.time +=1 drop2Player(data) data.player2Y-=data.speed if data.player2Y < 15 and data.player1Y> 15: data.winner= "player2" if data.player2Y>40: data.time +=1 drop2Player(data) if data.player1Y < 15 and data.player2Y <15: data.winner = "tie" for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible:powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug:bug.onTimerFired(data) hitScaryBug(data) powerupTimerFired(data) def powerupTimerFired(data): for coconut in data.coconuts1: if data.pause1Drop == False: coconut.onTimerFired(data) hit2Player(data) for coconut in data.coconuts2: if data.pause2Drop == False: coconut.onTimerFired(data) if data.start1 != None: # to make powerups only active for set amount of time if abs(data.start1-data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2-data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def twoPlayerRedrawAll(canvas, data): #magic #s for placement on screen canvas.create_image(data.width/4, data.height/2, image=data.halfBackground) canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground) canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts1: coconut.draw(canvas) for coconut in data.coconuts2: coconut.draw(canvas) drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) canvas.create_text(50,40, text = "Player 1",font = "Arial 15 bold", fill = "yellow") canvas.create_text(450,40, text = "Player 2",font = "Arial 15 bold", fill = "yellow") winner(canvas, data) drawHome(canvas, data) def winner(canvas, data): if data.winner== "player1": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! Player 1", font = "Arial 23 bold", fill = "yellow") elif data.winner== "player2": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! Player 2", font = "Arial 23 bold", fill = "yellow") elif data.winner== "tie": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # editor mode #################################### def editorKeyPressed(event,data): if event.keysym == "r": init(data) def editorMousePressed(event, data): #check for click on button for your speed checkHome(event, data) if data.easyY-data.r<= event.y <= data.easyY +data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.yourSpeed = "slow" data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.yourSpeed = "medium" data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.yourSpeed = "fast" data.fast = data.click data.slow, data.medium = data.notClick, data.notClick checkMiddle(event, data) checkLast(event, data) def checkMiddle(event, data): #check for click on button for rain speed if data.medX-data.r<= event.y <= data.medX + data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.rainSpeed = "drizzle" data.drizzle = data.click data.rain, data.thunderstorm = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.rainSpeed = "rain" data.rain = data.click data.drizzle, data.thunderstorm = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.rainSpeed = "thunderstorm" data.thunderstorm = data.click data.drizzle, data.rain = data.notClick, data.notClick def checkLast(event, data): #check for click on button for powerups if data.last-data.r<=event.y<= data.last+data.r: if data.easyY-2*data.r<= event.x<=data.easyY+2*data.r: data.powerUpsEditor = True data.yes, data.no = data.click, data.notClick if data.last-2*data.r<= event.x<=data.last+2*data.r: data.powerUpsEditor = False data.no, data.yes = data.click, data.notClick if data.enter == data.click: if data.enterX-data.r<=event.y<=data.enterX+data.r: if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.mode="levelCreated" def drawButtons(canvas, data): #makes each button data.font, data.fill = "Helvetica 13 bold", "yellow" canvas.create_text(data.medX,data.YST, text= "Your Speed:", font = data.font,fill =data.fill) canvas.create_image(data.easyX,data.easyY, image = data.slow) canvas.create_text(data.easyX,data.easyY, text="Slow", font = data.font) canvas.create_image(data.medX,data.easyY, image = data.medium) canvas.create_text(data.medX,data.easyY, text="Medium", font = data.font) canvas.create_image(data.hardX,data.easyY, image = data.fast) canvas.create_text(data.hardX,data.easyY, text="Fast",font = data.font) canvas.create_image(data.easyX,data.medX, image = data.drizzle) canvas.create_text(data.medX,data.RST, text= "Rain Speed:", font = data.font,fill =data.fill) canvas.create_text(data.easyX,data.medX, text="Drizzle",font = data.font) canvas.create_image(data.medX,data.medX, image = data.rain) canvas.create_text(data.medX,data.medX, text="Rain",font = data.font) canvas.create_image(data.hardX,data.medX, image = data.thunderstorm) canvas.create_text(data.hardX,data.medX, text="Heavy",font = data.font) canvas.create_text(data.medX,data.PUT, text= "PowerUps?", font = data.font,fill =data.fill) canvas.create_image(data.easyY,data.last, image = data.yes) canvas.create_text(data.easyY,data.last, text="Yes",font = data.font) canvas.create_image(data.last,data.last, image = data.no) canvas.create_text(data.last,data.last, text="No",font = data.font) changeEnter(canvas, data) def changeEnter(canvas, data): #makes it so the enter button respond to click if data.powerUpsEditor != None and data.yourSpeed != None and \ data.rainSpeed != None: data.enter = data.click canvas.create_image(data.medX,data.enterX, image = data.enter) canvas.create_text(data.medX,data.enterX, text="Enter",font = data.font) def editorTimerFired(data): data.editorTime += 1 if data.editorTime %2 ==0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): #background drops xPosition = random.randint(0,data.width) data.editorDrops.append(Coconuts(xPosition,0)) def editorRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.height/2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.S_P -10, text = "Edit Your Level!", font="Arial 23 bold", fill = "yellow") drawButtons(canvas, data) drawHome(canvas, data) #################################### # levelCreated mode #################################### def setEverything(data): #customizing game if data.yourSpeed == "slow": data.speed = 6 elif data.yourSpeed == "medium": data.speed = 10 elif data.yourSpeed == "fast": data.speed = 14 if data.rainSpeed == "thunderstorm": data.rSpeed = 7 elif data.rainSpeed == "rain": data.rSpeed = 10 elif data.rainSpeed == "drizzle": data.rSpeed = 13 def levelCoconutShot(data): #adding drops if data.levelEditorLives >0: if data.time%int(0.35*data.rSpeed) == 0: xPosition1 = random.randint(0,data.Player1Min-data.buffer) xPosition2 = random.randint(770, 870) xPosition3 = random.randint(220,770) data.coconuts.append(Coconuts(xPosition3,0)) data.coconuts.append(Coconuts(xPosition1,0)) data.coconuts.append(Coconuts(xPosition2,0)) if data.time % int(0.55*data.rSpeed) ==0: xPosition3 = random.randint(0, 220) xPosition5 = random.randint(220,770) data.coconuts.append(Coconuts(xPosition3,0)) data.coconuts.append(Coconuts(xPosition5,0)) if data.time % int(data.rSpeed) ==0: side = random.choice(data.sides) if side == "l": data.coconuts.append(Coconuts(3*data.width/8-20,0)) elif side =="r": data.coconuts.append(Coconuts(7*data.width/8+40,0)) xPosition4= random.randint(220,770) data.coconuts.append(Coconuts(xPosition4,0)) levelPowerUp(data) def levelPowerUp(data): # adding power-ups only if clicked yes if data.powerUpsEditor == True: if data.time % 20 == 0 and data.time%40 !=0: Position = random.choice(data.spotList) data.powerUps.append(PowerUps(Position,0)) if data.time%30 == 0: Position = random.choice(data.spotList) data.invincible.append(Invincible(Position,0)) if data.time %35==0: Position = random.choice(data.spotList) data.scaryBug.append(ScaryBug(Position,750)) def levelCreatedKeyPressed(event,data): if event.keysym == "r": init(data) if data.levelEditorLives>0: if (event.keysym == "Left") and data.cx>=317: data.cx -=(data.lane/2) elif(event.keysym == "Right") and data.cx<=740: data.cx +=(data.lane/2) def levelCreatedMousePressed(event, data): checkHome(event, data) def levelCreatedTimerFired(data): setEverything(data) if data.levelEditorLives>0: data.cy-=data.speed if data.cy < 15: data.level +=1 if data.cy>40: data.time +=1 if data.pauseDrops !=True: levelCoconutShot(data) if data.powerUpsEditor == False: for coconut in data.coconuts: coconut.onTimerFired(data) hit(data) if data.powerUpsEditor == True: for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) for coconut in data.coconuts: if data.pauseDrops == False:coconut.onTimerFired(data) if data.beInvincible == False: hit(data) if data.start != None: #to make powerups only active for set amount of time if abs(data.start-data.cy) >= 120: data.pauseDrops, data.beInvincible = False, False def levelCreatedRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_line(0,20, data.width, 20) for coconut in data.coconuts: coconut.draw(canvas) if data.powerUpsEditor == True: drawPowerups(canvas, data) canvas.create_image(data.cx, data.cy, image=data.ladyBug) canvas.create_text(data.width/6,100, text ="Total Lives: %d" %data.levelEditorLives, font = "Arial 20 bold", fill = "yellow") canvas.create_text(data.width/2,660, text ="""You lose a life for hitting a drop & don't get eaten!""", font = "Arial 15 bold", fill = "yellow") if data.levelEditorLives <=0: canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.deadScreen) canvas.create_text(data.width/2,data.height/4, text = "You Lose! Better Luck Next Time!", font = "Helvetica 23 bold", fill = "yellow") if data.level > 1: winEditor(canvas, data) drawHome(canvas, data) def winEditor(canvas, data): #screen for when you win canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # AI Difficulty Mode #################################### def difficultyKeyPressed(event,data): if event.keysym == "r": init(data) def drawDifficulties(canvas, data): canvas.create_text(data.medX,data.AITY, text= "Computer Difficulty:", font="Arial 23 bold", fill = "yellow") canvas.create_image(data.easyX, data.easyY, image=data.slow) canvas.create_text(data.easyX,data.easyY, text="Easy") canvas.create_image(data.medX, data.easyY, image=data.medium) canvas.create_text(data.medX,data.easyY, text="Medium") canvas.create_image(data.hardX, data.easyY, image=data.fast) canvas.create_text(data.hardX,data.easyY, text="Hard") if data.difficulty !=None: data.enter = data.click canvas.create_image(data.medX, data.enterY, image=data.enter) canvas.create_text(data.medX,data.enterY, text="Enter") def difficultyMousePressed(event, data): #sets up buttons to customize checkHome(event, data) if data.easyY-data.r<= event.y <= data.easyY +data.r: if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r: data.difficulty = data.difS data.slow = data.click data.medium, data.fast = data.notClick, data.notClick if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.difficulty = data.difM data.medium = data.click data.slow, data.fast = data.notClick, data.notClick if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r: data.difficulty = data.difH data.fast = data.click data.slow, data.medium = data.notClick, data.notClick if data.enter == data.click: if data.enterY-data.r<=event.y<=data.enterY+data.r: if data.medX-2*data.r<= event.x<=data.medX+2*data.r: data.mode="AI" def difficultyTimerFired(data): # makes normal background rain data.editorTime += 1 if data.editorTime %2 ==0: rainDrop(data) for drop in data.editorDrops: drop.onTimerFired(data) def rainDrop(data): xPosition = random.randint(0,data.width) data.editorDrops.append(Coconuts(xPosition,0)) def difficultyRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.height/2, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) drawDifficulties(canvas, data) drawHome(canvas, data) #################################### # AI mode #################################### def hitAI1(data, distance): for coconut in data.coconutsAI1: # so AI switches by itself if (data.player1Y-data.r - coconut.y<=distance) and \ data.switchOnProgress == False: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r or AISwitchBug(data,distance)==True: testInt = random.randint(0,9) # to have different levels of difficulty if testInt<= data.difficulty: data.switchOnProgress= True if data.player1X == 150: data.player1X = 340 else: data.player1X = 150 data.switchOnProgress= False if coconut.y>=data.player1Y-data.r and coconut.y<=data.player1Y+data.r: if coconut.x>=data.player1X-data.r and \ coconut.x<=data.player1X+data.r: data.player1Y+=50 data.coconutsAI1.remove(coconut) def AISwitchBug(data, distance): #AI to move for spider for scaryBug in data.scaryBug: if (data.player1Y-data.r - scaryBug.y<=distance) and \ data.switchOnProgress == False: if scaryBug.x>=data.player1X-data.r and \ scaryBug.x<=data.player1X+data.r: return True def hitAI2(data, distance): # check if human controlled player hits drops for coconut in data.coconutsAI2: if coconut.y>=data.player2Y-data.r and coconut.y<=data.player2Y+data.r: if coconut.x>=data.player2X-data.r and \ coconut.x<=data.player2X+data.r: data.player2Y+=50 data.coconutsAI2.remove(coconut) def coconutShotAI(data): if data.winner ==None: # randomize position of drops off of tree if data.time%15==0: xPosition1 = random.randint(0,385) if abs(xPosition1 - 100)>40 and abs(xPosition1 - 360)>40: if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition1,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition1 +410,0)) if data.time%8 ==0: xPosition2 = random.randint(0,80) xPosition3 = random.randint(364, 385) if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(xPosition2,0)) data.coconutsAI1.append(Coconuts(xPosition3,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(xPosition2+410,0)) data.coconutsAI2.append(Coconuts(xPosition3+410,0)) addExtraCoconut(data) addPowerUpsAI(data) def addExtraCoconut(data): #adds drops to edges of trees if data.time % (18) ==0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(140,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(540,0)) elif side =="r": if data.pause1Drop != True: data.coconutsAI1.append(Coconuts(344,0)) if data.pause2Drop != True: data.coconutsAI2.append(Coconuts(755,0)) if data.time % 37 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.powerUps.append(PowerUps(140,0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(550,0)) elif side =="r": if data.pause1Drop != True: data.powerUps.append(PowerUps(344,0)) if data.pause2Drop != True: data.powerUps.append(PowerUps(755,0)) def addPowerUpsAI(data): #randomly add powerups on tree if data.time%33 == 0: side = random.choice(data.sides) if side == "l": if data.pause1Drop != True: data.invincible.append(Invincible(140,0)) if data.pause2Drop != True: data.invincible.append(Invincible(550,0)) elif side =="r": if data.pause1Drop != True: data.invincible.append(Invincible(344,0)) if data.pause2Drop != True: data.invincible.append(Invincible(755,0)) if data.time %66==0: side = random.choice(data.sides) if side == "l": data.scaryBug.append(ScaryBug(140,750)) data.scaryBug.append(ScaryBug(550,750)) elif side =="r": data.scaryBug.append(ScaryBug(344,750)) data.scaryBug.append(ScaryBug(750,750)) def AIKeyPressed(event,data): if event.keysym == "r": init(data) if data.winner==None: if (event.keysym == "Left") and data.onLeft1==False: data.onLeft1 = True data.player2X = 550 elif(event.keysym == "Right") and data.onLeft1== True: data.onLeft1 = False data.player2X = 750 def AIMousePressed(event, data): checkHome(event, data) def AITimerFired(data): if data.winner == None: #want to check hit twice (before & after elements move) if data.Invincible1 == False:hitAI1(data, 31) if data.Invincible2 == True: pass elif data.Invincible2 == False:hitAI2(data, 31) for coconut in data.coconutsAI1: if data.pause1Drop == False:coconut.onTimerFired(data) for coconut in data.coconutsAI2: if data.pause2Drop == False:coconut.onTimerFired(data) # second check if data.Invincible1 == False:hitAI1(data,13) if data.Invincible2 == True:pass elif data.Invincible2 == False:hitAI2(data,13) data.player1Y-=data.speedAI #establishing winer if data.player1Y < 15 and data.player2Y >15: data.winner= "player1" if data.player1Y>40: data.time +=1 coconutShotAI(data) data.player2Y-=data.speedAI if data.player2Y < 15 and data.player1Y> 15: data.winner= "player2" if data.player2Y>40: data.time +=1 coconutShotAI(data) if data.player1Y < 15 and data.player2Y <15: data.winner = "tie" for powerUp in data.powerUps: powerUp.onTimerFired(data) hitPause(data) powerUpAITimerFired(data) def powerUpAITimerFired(data): #moves both sides symmetrically for powerUp in data.invincible: powerUp.onTimerFired(data) hitInvincible(data) for bug in data.scaryBug: bug.onTimerFired(data) hitScaryBug(data) if data.start1 != None: if abs(data.start1-data.player1Y) >= 120: data.pause1Drop = False data.Invincible1 = False if data.start2 != None: if abs(data.start2-data.player2Y) >= 120: data.pause2Drop = False data.Invincible2 = False def AIRedrawAll(canvas, data): canvas.create_image(data.width/4, data.height/2, image=data.halfBackground) canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground) canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10) canvas.create_line(0,20, data.width, 20) for coconut in data.coconutsAI1: coconut.draw(canvas) for coconut in data.coconutsAI2: coconut.draw(canvas) canvas.create_text(50,40, text = "Computer",font = "Arial 15 bold", fill = "yellow") canvas.create_text(450,40, text = "Player 1",font = "Arial 15 bold", fill = "yellow") drawPowerups(canvas, data) canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug) canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug) AIWinner(canvas, data) drawHome(canvas, data) def AIWinner(canvas, data): if data.winner== "player1": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "The Computer Won :(", font = "Arial 23 bold", fill = "yellow") elif data.winner== "player2": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "You Made it! You Won!", font = "Arial 23 bold", fill = "yellow") elif data.winner== "tie": canvas.create_rectangle(0,0, data.width, data.height, fill = "black") canvas.create_image(data.width/2, data.height/2, image=data.winScreen) canvas.create_image(300, 320, image=data.winBug) canvas.create_text(data.width/2,100, text = "Tie! You Both Made it!", font = "Arial 23 bold", fill = "yellow") #################################### # ScoreBoard mode #################################### def scoreboardKeyPressed(event, data): if event.keysym == "r": init(data) def scoreboardMousePressed(event, data): checkHome(event, data) def scoreboardTimerFired(data): difficultyTimerFired(data) def scoreboardRedrawAll(canvas, data): canvas.create_image(data.width/2, data.height/2, image=data.background) canvas.create_image(data.width/2, data.tbgY, image=data.tbg) for drop in data.editorDrops: drop.draw(canvas) canvas.create_text(data.width/2, data.txtTScore, text="Top Scores!", font = "Arial 30 bold", fill = "yellow") canvas.create_text(data.width/2, data.S_P, text="Score_Player", font = "Arial 20 bold", fill = "yellow") drawHome(canvas, data) #reads file data.savedScores data.savedScores=readFile("score.txt") score=data.savedScores.splitlines() scores=[] for line in score: scores.append(line.split(",")) #sorts scores to find top 5 scores = sorted(scores, key = lambda x: int(x[0])) top5 = scores[-data.numScores:] top5.reverse() for i in range(len(top5)): canvas.create_text(data.width/2, data.scoreShift+(i*50), text = top5[i], font = "Arial 18 bold", fill = "yellow") #################################### # help mode #################################### def helpKeyPressed(event, data): if event.keysym == "r": init(data) def helpMousePressed(event, data): checkHome(event, data) def helpTimerFired(data): difficultyTimerFired(data) def helpRedrawAll(canvas, data): canvas.create_image(data.width/2, data.helpY, image=data.helpScreen) for drop in data.editorDrops: drop.draw(canvas) drawHome(canvas, data) ####################################### # use the run function as-is from notes ####################################### def run(width=15000, height=25000): def redrawAllWrapper(canvas, data): canvas.delete(ALL) redrawAll(canvas, data) canvas.update() def mousePressedWrapper(event, canvas, data): mousePressed(event, data) redrawAllWrapper(canvas, data) def keyPressedWrapper(event, canvas, data): keyPressed(event, data) redrawAllWrapper(canvas, data) def timerFiredWrapper(canvas, data): timerFired(data) redrawAllWrapper(canvas, data) # pause, then call timerFired again canvas.after(data.timerDelay, timerFiredWrapper, canvas, data) # Set up data and call init class Struct(object): pass data = Struct() data.width = width data.height = height data.timerDelay = 100 # milliseconds # create the root and the canvas root = Tk() init(data) canvas = Canvas(root, width=data.width, height=data.height) canvas.pack() # set up events root.bind("<Button-1>", lambda event: mousePressedWrapper(event, canvas, data)) root.bind("<Key>", lambda event: keyPressedWrapper(event, canvas, data)) timerFiredWrapper(canvas, data) # and launch the app root.mainloop() # blocks until window is closed print("bye!") run(1000, 1000)
normal
{ "blob_id": "c893095be88636e6cb06eb3b939d8106fbb7a8ca", "index": 470, "step-1": "<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\n<mask token>\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\n<mask token>\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\n<mask token>\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\n<mask token>\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\n<mask token>\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\n<mask token>\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\n<mask token>\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\n<mask token>\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\n<mask token>\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\n<mask token>\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\n<mask token>\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\n<mask token>\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef init(data):\n data.score = 0\n data.mode = 'splashScreen'\n data.timerDelay = 100\n data.height = 800\n data.width = 800\n data.speed = 10\n data.speedAI = 12\n data.speedAI2 = 12\n data.switchOnProgress = False\n data.r = 25\n data.cx = 280\n data.cy = 750\n data.onLeft1, data.onLeft2 = True, True\n data.win = False\n data.coconuts = []\n data.powerUps = []\n data.coconuts1 = []\n data.coconuts2 = []\n data.coconutsAI1 = []\n data.coconutsAI2 = []\n data.invincible = []\n data.pauseDrops = False\n data.pause1Drop = False\n data.pause2Drop = False\n init1(data)\n\n\n<mask token>\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\n<mask token>\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\n<mask token>\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\n<mask token>\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\n<mask token>\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\ndef twoPlayerKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'a' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player1X = 150\n if event.keysym == 'd' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player1X = 330\n if event.keysym == 'Left' and data.onLeft2 == False:\n data.onLeft2 = True\n data.player2X = 550\n if event.keysym == 'Right' and data.onLeft2 == True:\n data.onLeft2 = False\n data.player2X = 750\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\n<mask token>\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\n<mask token>\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\n<mask token>\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef AIRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconutsAI1:\n coconut.draw(canvas)\n for coconut in data.coconutsAI2:\n coconut.draw(canvas)\n canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill\n ='yellow')\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n AIWinner(canvas, data)\n drawHome(canvas, data)\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\ndef init(data):\n data.score = 0\n data.mode = 'splashScreen'\n data.timerDelay = 100\n data.height = 800\n data.width = 800\n data.speed = 10\n data.speedAI = 12\n data.speedAI2 = 12\n data.switchOnProgress = False\n data.r = 25\n data.cx = 280\n data.cy = 750\n data.onLeft1, data.onLeft2 = True, True\n data.win = False\n data.coconuts = []\n data.powerUps = []\n data.coconuts1 = []\n data.coconuts2 = []\n data.coconutsAI1 = []\n data.coconutsAI2 = []\n data.invincible = []\n data.pauseDrops = False\n data.pause1Drop = False\n data.pause2Drop = False\n init1(data)\n\n\ndef init1(data):\n data.beInvincible = False\n data.Invincible1 = False\n data.Invincible2 = False\n data.scaryBug = []\n data.time = 0\n data.coconutFall = False\n data.sides = ['r', 'l']\n data.level = 1\n data.splashScreenTime = 0\n data.splashScreenDrops = []\n data.background = PhotoImage(file='tree.gif')\n data.deadScreen = PhotoImage(file='deadBug.gif')\n data.ladyBug = PhotoImage(file='lady.gif')\n data.winScreen = PhotoImage(file='treeTop1.gif')\n data.winBug = PhotoImage(file='littleBug.gif')\n data.halfBackground = PhotoImage(file='halfTree.gif')\n data.umbrella = PhotoImage(file='umbrella2.gif')\n data.spider = PhotoImage(file='spider.gif')\n data.hourGlass = PhotoImage(file='hourGlass.gif')\n data.splashScreen = PhotoImage(file='splash.gif')\n init2(data)\n\n\ndef init2(data):\n data.tbg = PhotoImage(file='tbg2.gif')\n data.click = PhotoImage(file='click.gif')\n data.notClick = PhotoImage(file='notClick.gif')\n data.player1X = 150\n data.player1Y = 750\n data.player2X = 550\n data.player2Y = 750\n data.winner = None\n data.speed = 12\n data.speed2 = 12\n data.editorTime = 0\n data.editorDrops = []\n data.margin = 100\n data.enter = False\n data.powerUpsEditor = None\n data.yourSpeed = None\n data.rainSpeed = None\n data.slow = data.notClick\n data.medium = data.notClick\n data.fast = data.notClick\n data.drizzle = data.notClick\n data.rain = data.notClick\n data.thunderstorm = data.notClick\n init3(data)\n\n\ndef init3(data):\n data.yes = data.notClick\n data.no = data.notClick\n data.enter = data.notClick\n data.levelEditorLives = 2\n data.rSpeed = None\n data.start = None\n data.start1 = None\n data.start2 = None\n data.difficulty = None\n data.mode1 = data.notClick\n data.mode2 = data.notClick\n data.mode3 = data.notClick\n data.mode4 = data.notClick\n data.mode5 = data.notClick\n data.mode6 = data.notClick\n data.home = PhotoImage(file='home.gif')\n data.helpScreen = PhotoImage(file='help1.gif')\n data.title = PhotoImage(file='title.gif')\n data.scoreList = []\n data.spotList = [270, 364, 458, 552, 646, 740]\n data.savedScores = readFile('score.txt')\n if data.mode == 'levelCreated':\n setEverything(data)\n initsplashScreenNumbers(data)\n\n\ndef initsplashScreenNumbers(data):\n data.splashButtonY = 425\n data.p1ButtonX = 225\n data.p2ButtonX = 290\n data.edButton = 355\n data.diffButton = 425\n data.helpButton = 490\n data.sboardButton = 555\n data.hitPenalty = 75\n data.splashText = data.height / 2 - 20\n data.lives = 2\n data.levelMax = 8\n data.lane = 94\n data.Player1Min = 270\n data.Player1Max = 740\n data.homeX = 50\n data.homeY = 650\n initScoreBoardHelp(data)\n init1Player(data)\n\n\ndef initScoreBoardHelp(data):\n data.tbgY = 5 * data.height / 12\n data.txtTScore = 150\n data.S_P = 220\n data.numScores = 5\n data.scorePos = data.height / 10\n data.scoreShift = 270\n data.helpY = data.height / 2 - 20\n data.name = ''\n data.printName = ''\n data.hit = False\n initAI(data)\n\n\ndef init1Player(data):\n data.buffer = 40\n\n\ndef initAI(data):\n data.AITY = 225\n data.easyX = 200\n data.easyY = 300\n data.medX = 400\n data.hardX = 600\n data.enterY = 450\n data.difS = 4\n data.difM = 6\n data.difH = 8\n data.last = 500\n data.enterX = 575\n data.PUT = 450\n data.RST = 350\n data.YST = 250\n\n\ndef mousePressed(event, data):\n if data.mode == 'splashScreen':\n splashScreenMousePressed(event, data)\n elif data.mode == '1Player':\n playerMousePressed(event, data)\n elif data.mode == '2Player':\n twoPlayerMousePressed(event, data)\n elif data.mode == 'editor':\n editorMousePressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedMousePressed(event, data)\n elif data.mode == 'AI':\n AIMousePressed(event, data)\n elif data.mode == 'difficulty':\n difficultyMousePressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardMousePressed(event, data)\n elif data.mode == 'help':\n helpMousePressed(event, data)\n\n\ndef keyPressed(event, data):\n if data.mode == 'splashScreen':\n splashKeyPressed(event, data)\n elif data.mode == '1Player':\n playerKeyPressed(event, data)\n elif data.mode == '2Player':\n twoPlayerKeyPressed(event, data)\n elif data.mode == 'editor':\n editorKeyPressed(event, data)\n elif data.mode == 'levelCreated':\n levelCreatedKeyPressed(event, data)\n elif data.mode == 'AI':\n AIKeyPressed(event, data)\n elif data.mode == 'difficulty':\n difficultyKeyPressed(event, data)\n elif data.mode == 'scoreboard':\n scoreboardKeyPressed(event, data)\n elif data.mode == 'help':\n helpKeyPressed(event, data)\n\n\ndef timerFired(data):\n if data.mode == 'splashScreen':\n splashScreenTimerFired(data)\n elif data.mode == '1Player':\n playerTimerFired(data)\n elif data.mode == '2Player':\n twoPlayerTimerFired(data)\n elif data.mode == 'editor':\n editorTimerFired(data)\n elif data.mode == 'levelCreated':\n levelCreatedTimerFired(data)\n elif data.mode == 'AI':\n AITimerFired(data)\n elif data.mode == 'difficulty':\n difficultyTimerFired(data)\n elif data.mode == 'scoreboard':\n scoreboardTimerFired(data)\n elif data.mode == 'help':\n helpTimerFired(data)\n\n\ndef redrawAll(canvas, data):\n if data.mode == 'splashScreen':\n splashScreenRedrawAll(canvas, data)\n elif data.mode == '1Player':\n playerRedrawAll(canvas, data)\n elif data.mode == '2Player':\n twoPlayerRedrawAll(canvas, data)\n elif data.mode == 'editor':\n editorRedrawAll(canvas, data)\n elif data.mode == 'levelCreated':\n levelCreatedRedrawAll(canvas, data)\n elif data.mode == 'AI':\n AIRedrawAll(canvas, data)\n elif data.mode == 'difficulty':\n difficultyRedrawAll(canvas, data)\n elif data.mode == 'scoreboard':\n scoreboardRedrawAll(canvas, data)\n elif data.mode == 'help':\n helpRedrawAll(canvas, data)\n\n\ndef splashScreenMousePressed(event, data):\n if (data.splashButtonY - 2 * data.r <= event.x <= data.splashButtonY + \n 2 * data.r):\n if data.p1ButtonX - data.r <= event.y <= data.p1ButtonX + data.r:\n data.mode = '1Player'\n if data.p2ButtonX - data.r <= event.y <= data.p2ButtonX + data.r:\n data.mode = '2Player'\n if data.edButton - data.r <= event.y <= data.edButton + data.r:\n data.mode = 'editor'\n if data.diffButton - data.r <= event.y <= data.diffButton + data.r:\n data.mode = 'difficulty'\n if data.helpButton - data.r <= event.y <= data.helpButton + data.r:\n data.mode = 'help'\n if data.sboardButton - data.r <= event.y <= data.sboardButton + data.r:\n data.mode = 'scoreboard'\n\n\ndef splashKeyPressed(event, data):\n pass\n\n\ndef splashScreenTimerFired(data):\n data.splashScreenTime += 1\n if data.splashScreenTime % 2 == 1:\n rainDropSplash(data)\n for drop in data.splashScreenDrops:\n drop.onTimerFired(data)\n\n\ndef splashScreenButtons(canvas, data):\n canvas.create_image(data.splashButtonY, data.p1ButtonX, image=data.mode1)\n canvas.create_image(data.splashButtonY, data.p2ButtonX, image=data.mode2)\n canvas.create_image(data.splashButtonY, data.edButton, image=data.mode3)\n canvas.create_image(data.splashButtonY, data.diffButton, image=data.mode4)\n canvas.create_image(data.splashButtonY, data.helpButton, image=data.mode5)\n canvas.create_image(data.splashButtonY, data.sboardButton, image=data.mode6\n )\n\n\ndef rainDropSplash(data):\n xPosition = random.randint(0, 800)\n data.splashScreenDrops.append(Coconuts(xPosition, 0))\n\n\ndef splashScreenRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.splashText - 10, image=data.title)\n for drop in data.splashScreenDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.splashText, text=\n \"\"\"\n 1.) Single Player Level Mode\n\n\n 2.) Two-Player Mode\n\n \n 3.) Level Creator Practice Mode\n\n \n 4.) Play Against the Computer\n\n \n 5.) Help and Instructions\n\n \n 6.) Scoreboard\n\n \n \"\"\"\n , font='Arial 14 bold', fill='yellow')\n splashScreenButtons(canvas, data)\n\n\ndef writeFile(path, contents):\n with open(path, 'wt') as f:\n f.write(contents)\n\n\ndef readFile(path):\n with open(path, 'rt') as f:\n return f.read()\n\n\nclass Coconuts(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.r = 9\n self.fill = 'deep sky blue'\n self.speed = 30\n self.outline = 'blue'\n\n def draw(self, canvas):\n canvas.create_polygon(self.x, self.y - 2 * self.r, self.x - self.r,\n self.y, self.x, self.y + self.r, self.x + self.r, self.y, fill=\n self.fill, outline=self.outline, width=3)\n\n def onTimerFired(self, data):\n self.y += self.speed\n\n\ndef hit(data):\n for coconut in data.coconuts:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if coconut.y >= data.cy - data.r and coconut.y <= data.cy + data.r:\n if (coconut.x >= data.cx - data.r and coconut.x <= data.cx +\n data.r):\n data.cy += data.hitPenalty\n if data.mode == 'levelCreated':\n data.lives -= 1\n elif data.hit == False and data.level < data.levelMax:\n data.score -= data.level\n data.coconuts.remove(coconut)\n if data.mode == 'levelCreated':\n data.levelEditorLives -= 1\n\n\ndef hit2Player(data):\n if data.mode == '2Player':\n if data.Invincible1 == False:\n for coconut in data.coconuts1:\n if (coconut.y >= data.player1Y - data.r and coconut.y <= \n data.player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <=\n data.player1X + data.r):\n data.player1Y += data.hitPenalty\n data.coconuts1.remove(coconut)\n if data.Invincible2 == False:\n for coconut in data.coconuts2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= \n data.player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <=\n data.player2X + data.r):\n data.player2Y += data.hitPenalty\n data.coconuts2.remove(coconut)\n\n\nclass PowerUps(Coconuts):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.hourGlass)\n\n\ndef hitPause(data):\n for powerUp in data.powerUps:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.pauseDrops = True\n data.start = data.cy\n data.powerUps.remove(powerUp)\n elif data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.pause1Drop = True\n data.start1 = data.player1Y\n data.powerUps.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.pause2Drop = True\n data.start2 = data.player2Y\n data.powerUps.remove(powerUp)\n\n\nclass Invincible(PowerUps):\n\n def __init__(self, x, y):\n super().__init__(x, y)\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.umbrella)\n\n\ndef hitInvincible(data):\n for powerUp in data.invincible:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if powerUp.y >= data.cy - data.r and powerUp.y <= data.cy + data.r:\n if (powerUp.x >= data.cx - data.r and powerUp.x <= data.cx +\n data.r):\n data.beInvincible = True\n data.start = data.cy\n data.invincible.remove(powerUp)\n if data.mode == '2Player' or data.mode == 'AI':\n if (powerUp.y >= data.player1Y - data.r and powerUp.y <= data.\n player1Y + data.r):\n if (powerUp.x >= data.player1X - data.r and powerUp.x <= \n data.player1X + data.r):\n data.Invincible1 = True\n data.start1 = data.player1Y\n data.invincible.remove(powerUp)\n if (powerUp.y >= data.player2Y - data.r and powerUp.y <= data.\n player2Y + data.r):\n if (powerUp.x >= data.player2X - data.r and powerUp.x <= \n data.player2X + data.r):\n data.Invincible2 = True\n data.start2 = data.player2Y\n data.invincible.remove(powerUp)\n\n\nclass ScaryBug(object):\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.speed = 25\n\n def draw(self, canvas, data):\n canvas.create_image(self.x, self.y, image=data.spider)\n\n def onTimerFired(self, data):\n if data.mode == '2Player' or data.mode == 'AI':\n self.speed = 35\n self.y -= self.speed\n if (data.mode == '1Player' or data.mode == 'levelCreated' and data.\n time % 8 == 0):\n side = random.choice(data.sides)\n if side == 'l':\n if self.x - data.lane >= data.Player1Min:\n self.x -= data.lane\n else:\n self.x += data.lane\n elif side == 'r':\n if self.x + data.lane <= data.Player1Max:\n self.x += data.lane\n else:\n self.x -= data.lane\n\n\ndef hitScaryBug(data):\n for bug in data.scaryBug:\n if data.mode == '1Player' or data.mode == 'levelCreated':\n if (bug.y >= data.cy - 1.5 * data.r and bug.y <= data.cy + 1.5 *\n data.r):\n if (bug.x >= data.cx - 1.5 * data.r and bug.x <= data.cx + \n 1.5 * data.r):\n data.hit = True\n data.lives = 0\n data.levelEditorLives = 0\n if data.mode == '2Player' or data.mode == 'AI':\n if (bug.y >= data.player1Y - data.r and bug.y <= data.player1Y +\n data.r):\n if (bug.x >= data.player1X - data.r and bug.x <= data.\n player1X + data.r):\n data.winner = 'player2'\n if (bug.y >= data.player2Y - data.r and bug.y <= data.player2Y +\n data.r):\n if (bug.x >= data.player2X - data.r and bug.x <= data.\n player2X + data.r):\n data.winner = 'player1'\n\n\ndef drawPowerups(canvas, data):\n for bug in data.scaryBug:\n bug.draw(canvas, data)\n for powerUp in data.powerUps:\n powerUp.draw(canvas, data)\n for powerUp in data.invincible:\n powerUp.draw(canvas, data)\n\n\ndef drawHome(canvas, data):\n canvas.create_image(data.homeX, data.homeY, image=data.home)\n\n\ndef checkHome(event, data):\n if data.homeY - data.r <= event.y <= data.homeY + data.r:\n if data.homeX - data.r <= event.x <= data.homeX + data.r:\n init(data)\n\n\ndef coconutShot(data):\n if data.level > 0 and data.pauseDrops == False:\n if data.time % int(data.levelMax / data.level\n ) == 0 or data.time % 6 == 0:\n xPosition1 = random.randint(0, data.Player1Min - data.buffer)\n xPosition2 = random.randint(data.Player1Max + data.buffer, data\n .width + data.buffer)\n data.coconuts.append(Coconuts(xPosition1, 0))\n data.coconuts.append(Coconuts(xPosition2, 0))\n xPosition4 = random.randint(data.Player1Min - data.buffer, data\n .Player1Max + data.buffer)\n data.coconuts.append(Coconuts(xPosition4, 0))\n if data.time % 5 == 0:\n xPosition3 = random.randint(0, data.Player1Min - data.buffer)\n data.coconuts.append(Coconuts(xPosition3, 0))\n if data.time % int(24 / data.level) == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.coconuts.append(Coconuts(data.Player1Min, 0))\n elif side == 'r':\n data.coconuts.append(Coconuts(data.Player1Max, 0))\n powerUpCoconutShot(data)\n\n\ndef powerUpCoconutShot(data):\n if data.time % 60 == 0 and data.time % 120 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 50 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 100 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\ndef playerKeyPressed(event, data):\n if data.level < data.levelMax and event.keysym == 'r':\n init(data)\n if event.keysym == 'Left' and data.cx >= data.Player1Min + data.lane / 2:\n data.cx -= data.lane / 2\n elif event.keysym == 'Right' and data.cx <= data.Player1Max:\n data.cx += data.lane / 2\n if data.level >= data.levelMax:\n if len(event.keysym) == 1:\n if len(data.name) < 15:\n data.name += event.keysym\n if event.keysym == 'BackSpace':\n data.name = data.name[0:-1]\n if event.keysym == 'Return':\n data.scoreList += data.score, data.name\n writeFile('score.txt', data.savedScores + str(data.score) + ',' +\n data.name + '\\n')\n data.mode = 'scoreboard'\n\n\n<mask token>\n\n\ndef playerRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 50, text='Level: %d' % data.level,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(data.width / 6, 80, text='Score: %d' % data.score,\n font='Arial 18 bold', fill='yellow')\n canvas.create_text(2 * data.width / 3, 660, text=\n \"\"\"The greater the level, the more points get\n added to your score!\"\"\"\n , font='Arial 15 bold', fill='yellow')\n if data.hit == True:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n canvas.create_text(data.width / 2, 280, text='Score: %d' % data.\n score, font='Arial 13 bold', fill='yellow')\n if data.level >= 8:\n madeIt(canvas, data)\n drawHome(canvas, data)\n\n\ndef madeIt(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 70, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n canvas.create_text(data.width / 2, 100, text='Score: %d' % data.score,\n font='Arial 15 bold', fill='yellow')\n canvas.create_text(data.width / 2, 375, text=\n 'Congrats! Enter your Name!', font='Arial 15 bold', fill='yellow')\n canvas.create_rectangle(data.width / 2 - 50, 400, data.width / 2 + 50, \n 450, fill='white')\n canvas.create_text(data.width / 2, 425, text=data.name)\n\n\ndef drop2Player(data):\n if data.winner == None and data.pauseDrops == False:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 25 and abs(xPosition1 - 360) > 25:\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 12 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconuts1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconuts2.append(Coconuts(755, 0))\n powerupDrop2Player(data)\n\n\ndef powerupDrop2Player(data):\n if data.time % 45 == 0 and data.time % 90 != 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n if data.time % 60 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 90 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(540, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(755, 750))\n\n\ndef twoPlayerKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'a' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player1X = 150\n if event.keysym == 'd' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player1X = 330\n if event.keysym == 'Left' and data.onLeft2 == False:\n data.onLeft2 = True\n data.player2X = 550\n if event.keysym == 'Right' and data.onLeft2 == True:\n data.onLeft2 = False\n data.player2X = 750\n\n\ndef twoPlayerMousePressed(event, data):\n checkHome(event, data)\n\n\ndef twoPlayerTimerFired(data):\n if data.winner == None:\n data.player1Y -= data.speed\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n drop2Player(data)\n data.player2Y -= data.speed\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n drop2Player(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n powerupTimerFired(data)\n\n\ndef powerupTimerFired(data):\n for coconut in data.coconuts1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n hit2Player(data)\n for coconut in data.coconuts2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef twoPlayerRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts1:\n coconut.draw(canvas)\n for coconut in data.coconuts2:\n coconut.draw(canvas)\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n canvas.create_text(50, 40, text='Player 1', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 2', font='Arial 15 bold', fill\n ='yellow')\n winner(canvas, data)\n drawHome(canvas, data)\n\n\ndef winner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 1', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! Player 2', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef editorKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef editorMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.yourSpeed = 'slow'\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.yourSpeed = 'medium'\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.yourSpeed = 'fast'\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n checkMiddle(event, data)\n checkLast(event, data)\n\n\ndef checkMiddle(event, data):\n if data.medX - data.r <= event.y <= data.medX + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.rainSpeed = 'drizzle'\n data.drizzle = data.click\n data.rain, data.thunderstorm = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.rainSpeed = 'rain'\n data.rain = data.click\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.rainSpeed = 'thunderstorm'\n data.thunderstorm = data.click\n data.drizzle, data.rain = data.notClick, data.notClick\n\n\ndef checkLast(event, data):\n if data.last - data.r <= event.y <= data.last + data.r:\n if data.easyY - 2 * data.r <= event.x <= data.easyY + 2 * data.r:\n data.powerUpsEditor = True\n data.yes, data.no = data.click, data.notClick\n if data.last - 2 * data.r <= event.x <= data.last + 2 * data.r:\n data.powerUpsEditor = False\n data.no, data.yes = data.click, data.notClick\n if data.enter == data.click:\n if data.enterX - data.r <= event.y <= data.enterX + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'levelCreated'\n\n\ndef drawButtons(canvas, data):\n data.font, data.fill = 'Helvetica 13 bold', 'yellow'\n canvas.create_text(data.medX, data.YST, text='Your Speed:', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Slow', font=data.font)\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium', font=data.font)\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Fast', font=data.font)\n canvas.create_image(data.easyX, data.medX, image=data.drizzle)\n canvas.create_text(data.medX, data.RST, text='Rain Speed:', font=data.\n font, fill=data.fill)\n canvas.create_text(data.easyX, data.medX, text='Drizzle', font=data.font)\n canvas.create_image(data.medX, data.medX, image=data.rain)\n canvas.create_text(data.medX, data.medX, text='Rain', font=data.font)\n canvas.create_image(data.hardX, data.medX, image=data.thunderstorm)\n canvas.create_text(data.hardX, data.medX, text='Heavy', font=data.font)\n canvas.create_text(data.medX, data.PUT, text='PowerUps?', font=data.\n font, fill=data.fill)\n canvas.create_image(data.easyY, data.last, image=data.yes)\n canvas.create_text(data.easyY, data.last, text='Yes', font=data.font)\n canvas.create_image(data.last, data.last, image=data.no)\n canvas.create_text(data.last, data.last, text='No', font=data.font)\n changeEnter(canvas, data)\n\n\ndef changeEnter(canvas, data):\n if (data.powerUpsEditor != None and data.yourSpeed != None and data.\n rainSpeed != None):\n data.enter = data.click\n canvas.create_image(data.medX, data.enterX, image=data.enter)\n canvas.create_text(data.medX, data.enterX, text='Enter', font=data.font)\n\n\ndef editorTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef editorRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.S_P - 10, text=\n 'Edit Your Level!', font='Arial 23 bold', fill='yellow')\n drawButtons(canvas, data)\n drawHome(canvas, data)\n\n\ndef setEverything(data):\n if data.yourSpeed == 'slow':\n data.speed = 6\n elif data.yourSpeed == 'medium':\n data.speed = 10\n elif data.yourSpeed == 'fast':\n data.speed = 14\n if data.rainSpeed == 'thunderstorm':\n data.rSpeed = 7\n elif data.rainSpeed == 'rain':\n data.rSpeed = 10\n elif data.rainSpeed == 'drizzle':\n data.rSpeed = 13\n\n\n<mask token>\n\n\ndef levelPowerUp(data):\n if data.powerUpsEditor == True:\n if data.time % 20 == 0 and data.time % 40 != 0:\n Position = random.choice(data.spotList)\n data.powerUps.append(PowerUps(Position, 0))\n if data.time % 30 == 0:\n Position = random.choice(data.spotList)\n data.invincible.append(Invincible(Position, 0))\n if data.time % 35 == 0:\n Position = random.choice(data.spotList)\n data.scaryBug.append(ScaryBug(Position, 750))\n\n\n<mask token>\n\n\ndef levelCreatedMousePressed(event, data):\n checkHome(event, data)\n\n\ndef levelCreatedTimerFired(data):\n setEverything(data)\n if data.levelEditorLives > 0:\n data.cy -= data.speed\n if data.cy < 15:\n data.level += 1\n if data.cy > 40:\n data.time += 1\n if data.pauseDrops != True:\n levelCoconutShot(data)\n if data.powerUpsEditor == False:\n for coconut in data.coconuts:\n coconut.onTimerFired(data)\n hit(data)\n if data.powerUpsEditor == True:\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n for coconut in data.coconuts:\n if data.pauseDrops == False:\n coconut.onTimerFired(data)\n if data.beInvincible == False:\n hit(data)\n if data.start != None:\n if abs(data.start - data.cy) >= 120:\n data.pauseDrops, data.beInvincible = False, False\n\n\ndef levelCreatedRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconuts:\n coconut.draw(canvas)\n if data.powerUpsEditor == True:\n drawPowerups(canvas, data)\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\n canvas.create_text(data.width / 6, 100, text='Total Lives: %d' % data.\n levelEditorLives, font='Arial 20 bold', fill='yellow')\n canvas.create_text(data.width / 2, 660, text=\n \"\"\"You lose a life for hitting a drop\n & don't get eaten!\"\"\",\n font='Arial 15 bold', fill='yellow')\n if data.levelEditorLives <= 0:\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n deadScreen)\n canvas.create_text(data.width / 2, data.height / 4, text=\n 'You Lose! Better Luck Next Time!', font='Helvetica 23 bold',\n fill='yellow')\n if data.level > 1:\n winEditor(canvas, data)\n drawHome(canvas, data)\n\n\ndef winEditor(canvas, data):\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='You Made it!', font=\n 'Arial 23 bold', fill='yellow')\n\n\n<mask token>\n\n\ndef drawDifficulties(canvas, data):\n canvas.create_text(data.medX, data.AITY, text='Computer Difficulty:',\n font='Arial 23 bold', fill='yellow')\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\n canvas.create_text(data.easyX, data.easyY, text='Easy')\n canvas.create_image(data.medX, data.easyY, image=data.medium)\n canvas.create_text(data.medX, data.easyY, text='Medium')\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\n canvas.create_text(data.hardX, data.easyY, text='Hard')\n if data.difficulty != None:\n data.enter = data.click\n canvas.create_image(data.medX, data.enterY, image=data.enter)\n canvas.create_text(data.medX, data.enterY, text='Enter')\n\n\ndef difficultyMousePressed(event, data):\n checkHome(event, data)\n if data.easyY - data.r <= event.y <= data.easyY + data.r:\n if data.easyX - 2 * data.r <= event.x <= data.easyX + 2 * data.r:\n data.difficulty = data.difS\n data.slow = data.click\n data.medium, data.fast = data.notClick, data.notClick\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.difficulty = data.difM\n data.medium = data.click\n data.slow, data.fast = data.notClick, data.notClick\n if data.hardX - 2 * data.r <= event.x <= data.hardX + 2 * data.r:\n data.difficulty = data.difH\n data.fast = data.click\n data.slow, data.medium = data.notClick, data.notClick\n if data.enter == data.click:\n if data.enterY - data.r <= event.y <= data.enterY + data.r:\n if data.medX - 2 * data.r <= event.x <= data.medX + 2 * data.r:\n data.mode = 'AI'\n\n\ndef difficultyTimerFired(data):\n data.editorTime += 1\n if data.editorTime % 2 == 0:\n rainDrop(data)\n for drop in data.editorDrops:\n drop.onTimerFired(data)\n\n\ndef rainDrop(data):\n xPosition = random.randint(0, data.width)\n data.editorDrops.append(Coconuts(xPosition, 0))\n\n\ndef difficultyRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.height / 2, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawDifficulties(canvas, data)\n drawHome(canvas, data)\n\n\ndef hitAI1(data, distance):\n for coconut in data.coconutsAI1:\n if (data.player1Y - data.r - coconut.y <= distance and data.\n switchOnProgress == False):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r or AISwitchBug(data, distance) == True):\n testInt = random.randint(0, 9)\n if testInt <= data.difficulty:\n data.switchOnProgress = True\n if data.player1X == 150:\n data.player1X = 340\n else:\n data.player1X = 150\n data.switchOnProgress = False\n if (coconut.y >= data.player1Y - data.r and coconut.y <= data.\n player1Y + data.r):\n if (coconut.x >= data.player1X - data.r and coconut.x <= data.\n player1X + data.r):\n data.player1Y += 50\n data.coconutsAI1.remove(coconut)\n\n\ndef AISwitchBug(data, distance):\n for scaryBug in data.scaryBug:\n if (data.player1Y - data.r - scaryBug.y <= distance and data.\n switchOnProgress == False):\n if (scaryBug.x >= data.player1X - data.r and scaryBug.x <= data\n .player1X + data.r):\n return True\n\n\ndef hitAI2(data, distance):\n for coconut in data.coconutsAI2:\n if (coconut.y >= data.player2Y - data.r and coconut.y <= data.\n player2Y + data.r):\n if (coconut.x >= data.player2X - data.r and coconut.x <= data.\n player2X + data.r):\n data.player2Y += 50\n data.coconutsAI2.remove(coconut)\n\n\ndef coconutShotAI(data):\n if data.winner == None:\n if data.time % 15 == 0:\n xPosition1 = random.randint(0, 385)\n if abs(xPosition1 - 100) > 40 and abs(xPosition1 - 360) > 40:\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition1, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition1 + 410, 0))\n if data.time % 8 == 0:\n xPosition2 = random.randint(0, 80)\n xPosition3 = random.randint(364, 385)\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(xPosition2, 0))\n data.coconutsAI1.append(Coconuts(xPosition3, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(xPosition2 + 410, 0))\n data.coconutsAI2.append(Coconuts(xPosition3 + 410, 0))\n addExtraCoconut(data)\n addPowerUpsAI(data)\n\n\ndef addExtraCoconut(data):\n if data.time % 18 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(140, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(540, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.coconutsAI1.append(Coconuts(344, 0))\n if data.pause2Drop != True:\n data.coconutsAI2.append(Coconuts(755, 0))\n if data.time % 37 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(140, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.powerUps.append(PowerUps(344, 0))\n if data.pause2Drop != True:\n data.powerUps.append(PowerUps(755, 0))\n\n\ndef addPowerUpsAI(data):\n if data.time % 33 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(140, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(550, 0))\n elif side == 'r':\n if data.pause1Drop != True:\n data.invincible.append(Invincible(344, 0))\n if data.pause2Drop != True:\n data.invincible.append(Invincible(755, 0))\n if data.time % 66 == 0:\n side = random.choice(data.sides)\n if side == 'l':\n data.scaryBug.append(ScaryBug(140, 750))\n data.scaryBug.append(ScaryBug(550, 750))\n elif side == 'r':\n data.scaryBug.append(ScaryBug(344, 750))\n data.scaryBug.append(ScaryBug(750, 750))\n\n\ndef AIKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n if data.winner == None:\n if event.keysym == 'Left' and data.onLeft1 == False:\n data.onLeft1 = True\n data.player2X = 550\n elif event.keysym == 'Right' and data.onLeft1 == True:\n data.onLeft1 = False\n data.player2X = 750\n\n\ndef AIMousePressed(event, data):\n checkHome(event, data)\n\n\ndef AITimerFired(data):\n if data.winner == None:\n if data.Invincible1 == False:\n hitAI1(data, 31)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 31)\n for coconut in data.coconutsAI1:\n if data.pause1Drop == False:\n coconut.onTimerFired(data)\n for coconut in data.coconutsAI2:\n if data.pause2Drop == False:\n coconut.onTimerFired(data)\n if data.Invincible1 == False:\n hitAI1(data, 13)\n if data.Invincible2 == True:\n pass\n elif data.Invincible2 == False:\n hitAI2(data, 13)\n data.player1Y -= data.speedAI\n if data.player1Y < 15 and data.player2Y > 15:\n data.winner = 'player1'\n if data.player1Y > 40:\n data.time += 1\n coconutShotAI(data)\n data.player2Y -= data.speedAI\n if data.player2Y < 15 and data.player1Y > 15:\n data.winner = 'player2'\n if data.player2Y > 40:\n data.time += 1\n coconutShotAI(data)\n if data.player1Y < 15 and data.player2Y < 15:\n data.winner = 'tie'\n for powerUp in data.powerUps:\n powerUp.onTimerFired(data)\n hitPause(data)\n powerUpAITimerFired(data)\n\n\ndef powerUpAITimerFired(data):\n for powerUp in data.invincible:\n powerUp.onTimerFired(data)\n hitInvincible(data)\n for bug in data.scaryBug:\n bug.onTimerFired(data)\n hitScaryBug(data)\n if data.start1 != None:\n if abs(data.start1 - data.player1Y) >= 120:\n data.pause1Drop = False\n data.Invincible1 = False\n if data.start2 != None:\n if abs(data.start2 - data.player2Y) >= 120:\n data.pause2Drop = False\n data.Invincible2 = False\n\n\ndef AIRedrawAll(canvas, data):\n canvas.create_image(data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_image(3 * data.width / 4, data.height / 2, image=data.\n halfBackground)\n canvas.create_line(data.width / 2, 0, data.width / 2, data.height, width=10\n )\n canvas.create_line(0, 20, data.width, 20)\n for coconut in data.coconutsAI1:\n coconut.draw(canvas)\n for coconut in data.coconutsAI2:\n coconut.draw(canvas)\n canvas.create_text(50, 40, text='Computer', font='Arial 15 bold', fill=\n 'yellow')\n canvas.create_text(450, 40, text='Player 1', font='Arial 15 bold', fill\n ='yellow')\n drawPowerups(canvas, data)\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\n AIWinner(canvas, data)\n drawHome(canvas, data)\n\n\ndef AIWinner(canvas, data):\n if data.winner == 'player1':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text='The Computer Won :(',\n font='Arial 23 bold', fill='yellow')\n elif data.winner == 'player2':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'You Made it! You Won!', font='Arial 23 bold', fill='yellow')\n elif data.winner == 'tie':\n canvas.create_rectangle(0, 0, data.width, data.height, fill='black')\n canvas.create_image(data.width / 2, data.height / 2, image=data.\n winScreen)\n canvas.create_image(300, 320, image=data.winBug)\n canvas.create_text(data.width / 2, 100, text=\n 'Tie! You Both Made it!', font='Arial 23 bold', fill='yellow')\n\n\ndef scoreboardKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\ndef scoreboardMousePressed(event, data):\n checkHome(event, data)\n\n\ndef scoreboardTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef scoreboardRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.height / 2, image=data.background)\n canvas.create_image(data.width / 2, data.tbgY, image=data.tbg)\n for drop in data.editorDrops:\n drop.draw(canvas)\n canvas.create_text(data.width / 2, data.txtTScore, text='Top Scores!',\n font='Arial 30 bold', fill='yellow')\n canvas.create_text(data.width / 2, data.S_P, text='Score_Player', font=\n 'Arial 20 bold', fill='yellow')\n drawHome(canvas, data)\n data.savedScores\n data.savedScores = readFile('score.txt')\n score = data.savedScores.splitlines()\n scores = []\n for line in score:\n scores.append(line.split(','))\n scores = sorted(scores, key=lambda x: int(x[0]))\n top5 = scores[-data.numScores:]\n top5.reverse()\n for i in range(len(top5)):\n canvas.create_text(data.width / 2, data.scoreShift + i * 50, text=\n top5[i], font='Arial 18 bold', fill='yellow')\n\n\ndef helpKeyPressed(event, data):\n if event.keysym == 'r':\n init(data)\n\n\n<mask token>\n\n\ndef helpTimerFired(data):\n difficultyTimerFired(data)\n\n\ndef helpRedrawAll(canvas, data):\n canvas.create_image(data.width / 2, data.helpY, image=data.helpScreen)\n for drop in data.editorDrops:\n drop.draw(canvas)\n drawHome(canvas, data)\n\n\ndef run(width=15000, height=25000):\n\n def redrawAllWrapper(canvas, data):\n canvas.delete(ALL)\n redrawAll(canvas, data)\n canvas.update()\n\n def mousePressedWrapper(event, canvas, data):\n mousePressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def keyPressedWrapper(event, canvas, data):\n keyPressed(event, data)\n redrawAllWrapper(canvas, data)\n\n def timerFiredWrapper(canvas, data):\n timerFired(data)\n redrawAllWrapper(canvas, data)\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\n\n\n class Struct(object):\n pass\n data = Struct()\n data.width = width\n data.height = height\n data.timerDelay = 100\n root = Tk()\n init(data)\n canvas = Canvas(root, width=data.width, height=data.height)\n canvas.pack()\n root.bind('<Button-1>', lambda event: mousePressedWrapper(event, canvas,\n data))\n root.bind('<Key>', lambda event: keyPressedWrapper(event, canvas, data))\n timerFiredWrapper(canvas, data)\n root.mainloop()\n print('bye!')\n\n\n<mask token>\n", "step-5": "#Arushi Patel (aruship)\r\nfrom tkinter import *\r\nimport random\r\n\r\n######################################\r\n#images taken from wikipedia,pixabay,\r\n#trans americas, clipartpanda,pngimg,\r\n#findicons, microsoft word\r\n######################################\r\n\r\n####################################\r\n# init\r\n####################################\r\ndef init(data):\r\n data.score =0\r\n data.mode = \"splashScreen\"\r\n data.timerDelay = 100\r\n data.height = 800\r\n data.width = 800\r\n data.speed = 10\r\n data.speedAI = 12\r\n data.speedAI2 = 12\r\n data.switchOnProgress = False\r\n data.r = 25\r\n data.cx= 280\r\n data.cy=750\r\n data.onLeft1, data.onLeft2 = True, True\r\n data.win= False\r\n data.coconuts = []\r\n data.powerUps = []\r\n data.coconuts1 = []\r\n data.coconuts2 = []\r\n data.coconutsAI1 =[]\r\n data.coconutsAI2 = []\r\n data.invincible = []\r\n data.pauseDrops = False\r\n data.pause1Drop = False\r\n data.pause2Drop = False\r\n init1(data)\r\n\r\ndef init1(data):\r\n data.beInvincible = False\r\n data.Invincible1 = False\r\n data.Invincible2 = False\r\n data.scaryBug = []\r\n data.time = 0\r\n data.coconutFall = False\r\n data.sides = [\"r\", \"l\"]\r\n data.level = 1\r\n data.splashScreenTime = 0\r\n data.splashScreenDrops = []\r\n data.background= PhotoImage(file=\"tree.gif\")\r\n data.deadScreen = PhotoImage(file = \"deadBug.gif\")\r\n data.ladyBug = PhotoImage(file = \"lady.gif\")\r\n data.winScreen= PhotoImage(file = \"treeTop1.gif\")\r\n data.winBug = PhotoImage(file = \"littleBug.gif\")\r\n data.halfBackground = PhotoImage(file = \"halfTree.gif\")\r\n data.umbrella = PhotoImage(file = \"umbrella2.gif\")\r\n data.spider = PhotoImage(file = \"spider.gif\")\r\n data.hourGlass = PhotoImage(file = \"hourGlass.gif\")\r\n data.splashScreen = PhotoImage(file = \"splash.gif\")\r\n init2(data)\r\n\r\ndef init2(data):\r\n data.tbg= PhotoImage(file = \"tbg2.gif\")\r\n data.click = PhotoImage(file = \"click.gif\")\r\n data.notClick = PhotoImage(file = \"notClick.gif\")\r\n data.player1X = 150\r\n data.player1Y = 750\r\n data.player2X = 550\r\n data.player2Y = 750\r\n data.winner = None\r\n data.speed = 12\r\n data.speed2 = 12\r\n data.editorTime = 0\r\n data.editorDrops = []\r\n data.margin = 100\r\n data.enter = False\r\n data.powerUpsEditor = None\r\n data.yourSpeed = None\r\n data.rainSpeed = None\r\n data.slow= data.notClick\r\n data.medium = data.notClick\r\n data.fast = data.notClick\r\n data.drizzle = data.notClick\r\n data.rain =data.notClick\r\n data.thunderstorm = data.notClick\r\n init3(data)\r\n\r\ndef init3(data):\r\n data.yes = data.notClick\r\n data.no = data.notClick\r\n data.enter = data.notClick\r\n data.levelEditorLives =2\r\n data.rSpeed = None\r\n data.start = None\r\n data.start1 = None\r\n data.start2 = None\r\n data.difficulty = None\r\n data.mode1 = data.notClick\r\n data.mode2 = data.notClick\r\n data.mode3 = data.notClick\r\n data.mode4 = data.notClick\r\n data.mode5 = data.notClick\r\n data.mode6 = data.notClick\r\n data.home = PhotoImage(file = \"home.gif\")\r\n data.helpScreen = PhotoImage(file = \"help1.gif\")\r\n data.title = PhotoImage(file = \"title.gif\")\r\n data.scoreList = []\r\n data.spotList = [270,364,458,552, 646, 740]\r\n data.savedScores = readFile(\"score.txt\")\r\n if data.mode == \"levelCreated\":\r\n setEverything(data)\r\n initsplashScreenNumbers(data)\r\n\r\ndef initsplashScreenNumbers(data):\r\n data.splashButtonY = 425\r\n data.p1ButtonX= 225\r\n data.p2ButtonX = 290\r\n data.edButton = 355\r\n data.diffButton = 425\r\n data.helpButton = 490\r\n data.sboardButton = 555\r\n data.hitPenalty = 75\r\n data.splashText = data.height/2-20\r\n data.lives = 2\r\n data.levelMax = 8\r\n data.lane = 94\r\n data.Player1Min= 270\r\n data.Player1Max = 740\r\n data.homeX =50\r\n data.homeY = 650\r\n initScoreBoardHelp(data)\r\n init1Player(data)\r\n\r\ndef initScoreBoardHelp(data):\r\n data.tbgY=5*data.height/12\r\n data.txtTScore = 150\r\n data.S_P = 220\r\n data.numScores = 5\r\n data.scorePos = data.height/10\r\n data.scoreShift = 270\r\n data.helpY = data.height/2-20\r\n data.name = \"\"\r\n data.printName = \"\"\r\n data.hit = False\r\n initAI(data)\r\n\r\ndef init1Player(data):\r\n data.buffer = 40\r\n\r\ndef initAI(data):\r\n data.AITY = 225\r\n data.easyX = 200\r\n data.easyY = 300\r\n data.medX =400\r\n data.hardX = 600\r\n data.enterY = 450\r\n data.difS = 4\r\n data.difM = 6\r\n data.difH = 8\r\n data.last = 500\r\n data.enterX = 575\r\n data.PUT = 450\r\n data.RST = 350\r\n data.YST = 250\r\n####################################\r\n# mode dispatcher\r\n####################################\r\n\r\ndef mousePressed(event, data):\r\n if (data.mode == \"splashScreen\"): splashScreenMousePressed(event, data)\r\n elif (data.mode == \"1Player\"): playerMousePressed(event, data)\r\n elif (data.mode == \"2Player\"): twoPlayerMousePressed(event, data)\r\n elif (data.mode == \"editor\"): editorMousePressed(event,data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedMousePressed(event,data)\r\n elif (data.mode == \"AI\"): AIMousePressed(event, data)\r\n elif (data.mode == \"difficulty\"): difficultyMousePressed(event, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardMousePressed(event, data)\r\n elif (data.mode == \"help\"): helpMousePressed(event, data)\r\n\r\ndef keyPressed(event, data):\r\n if (data.mode == \"splashScreen\"): splashKeyPressed(event, data)\r\n elif (data.mode == \"1Player\"):playerKeyPressed(event, data)\r\n elif (data.mode == \"2Player\"):twoPlayerKeyPressed(event, data)\r\n elif (data.mode == \"editor\"): editorKeyPressed(event, data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedKeyPressed(event,data)\r\n elif (data.mode == \"AI\"): AIKeyPressed(event, data)\r\n elif (data.mode == \"difficulty\"): difficultyKeyPressed(event, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardKeyPressed(event, data)\r\n elif (data.mode == \"help\"): helpKeyPressed(event, data)\r\n \r\ndef timerFired(data):\r\n if (data.mode == \"splashScreen\"): splashScreenTimerFired(data)\r\n elif (data.mode == \"1Player\"):playerTimerFired(data)\r\n elif (data.mode == \"2Player\"):twoPlayerTimerFired(data)\r\n elif (data.mode == \"editor\"): editorTimerFired(data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedTimerFired(data)\r\n elif (data.mode == \"AI\"): AITimerFired(data)\r\n elif (data.mode == \"difficulty\"): difficultyTimerFired(data)\r\n elif (data.mode == \"scoreboard\"): scoreboardTimerFired(data)\r\n elif (data.mode == \"help\"): helpTimerFired(data)\r\n\r\ndef redrawAll(canvas, data):\r\n if (data.mode == \"splashScreen\"): splashScreenRedrawAll(canvas, data)\r\n elif (data.mode == \"1Player\"):playerRedrawAll(canvas, data)\r\n elif (data.mode == \"2Player\"):twoPlayerRedrawAll(canvas, data)\r\n elif (data.mode == \"editor\"): editorRedrawAll(canvas, data)\r\n elif (data.mode == \"levelCreated\"): levelCreatedRedrawAll(canvas,data)\r\n elif (data.mode == \"AI\"): AIRedrawAll(canvas, data)\r\n elif (data.mode == \"difficulty\"): difficultyRedrawAll(canvas, data)\r\n elif (data.mode == \"scoreboard\"): scoreboardRedrawAll(canvas, data)\r\n elif (data.mode == \"help\"): helpRedrawAll(canvas, data)\r\n\r\n####################################\r\n# splashScreen mode\r\n####################################\r\ndef splashScreenMousePressed(event, data):\r\n #checks for selection of mode\r\n if data.splashButtonY-2*data.r <= event.x <=data.splashButtonY+2*data.r:\r\n if data.p1ButtonX-data.r<=event.y<=data.p1ButtonX+data.r:\r\n data.mode = \"1Player\"\r\n if data.p2ButtonX-data.r<=event.y<=data.p2ButtonX+data.r:\r\n data.mode = \"2Player\"\r\n if data.edButton-data.r<=event.y<=data.edButton+data.r:\r\n data.mode = \"editor\"\r\n if data.diffButton-data.r<=event.y<=data.diffButton+data.r:\r\n data.mode = \"difficulty\"\r\n if data.helpButton-data.r<=event.y<=data.helpButton+data.r:\r\n data.mode = \"help\"\r\n if data.sboardButton-data.r<=event.y<=data.sboardButton+data.r:\r\n data.mode = \"scoreboard\"\r\n\r\ndef splashKeyPressed(event, data):\r\n pass\r\n\r\n\r\ndef splashScreenTimerFired(data):\r\n data.splashScreenTime += 1\r\n if data.splashScreenTime %2 ==1:\r\n rainDropSplash(data)\r\n for drop in data.splashScreenDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef splashScreenButtons(canvas, data):\r\n canvas.create_image(data.splashButtonY,data.p1ButtonX,image = data.mode1)\r\n canvas.create_image(data.splashButtonY,data.p2ButtonX,image = data.mode2)\r\n canvas.create_image(data.splashButtonY,data.edButton,image = data.mode3)\r\n canvas.create_image(data.splashButtonY,data.diffButton,image = data.mode4)\r\n canvas.create_image(data.splashButtonY,data.helpButton,image = data.mode5)\r\n canvas.create_image(data.splashButtonY,data.sboardButton,image =data.mode6)\r\n \r\ndef rainDropSplash(data):\r\n xPosition = random.randint(0,800)\r\n data.splashScreenDrops.append(Coconuts(xPosition,0))\r\n\r\ndef splashScreenRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.splashText-10, image=data.title)\r\n for drop in data.splashScreenDrops: drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.splashText, text=\"\"\"\r\n 1.) Single Player Level Mode\r\n\r\n\r\n 2.) Two-Player Mode\r\n\r\n \r\n 3.) Level Creator Practice Mode\r\n\r\n \r\n 4.) Play Against the Computer\r\n\r\n \r\n 5.) Help and Instructions\r\n\r\n \r\n 6.) Scoreboard\r\n\r\n \r\n \"\"\", font=\"Arial 14 bold\", fill = \"yellow\")\r\n splashScreenButtons(canvas, data)\r\n\r\n####################################\r\n# taken from class notes\r\n####################################\r\n\r\ndef writeFile(path, contents):\r\n with open(path, \"wt\") as f:\r\n f.write(contents)\r\n\r\ndef readFile(path):\r\n with open(path, \"rt\") as f:\r\n return f.read()\r\n\r\n####################################\r\n# 1Player mode\r\n####################################\r\n\r\n\r\n#Coconuts (from Mario game) represent the water drops\r\nclass Coconuts(object):\r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n self.r = 9\r\n self.fill = \"deep sky blue\"\r\n self.speed = 30\r\n self.outline= \"blue\"\r\n\r\n def draw(self, canvas):\r\n canvas.create_polygon(self.x,self.y- 2*self.r,\r\n self.x-self.r, self.y,\r\n self.x, self.y + self.r,\r\n self.x+self.r, self.y, fill = self.fill,\r\n outline = self.outline, width = 3)\r\n\r\n def onTimerFired(self, data):\r\n # downward falling motion\r\n self.y += self.speed\r\n \r\ndef hit(data):\r\n #checks for hitting rain\r\n for coconut in data.coconuts:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if coconut.y>=data.cy-data.r and coconut.y<=data.cy+data.r:\r\n if coconut.x>=data.cx-data.r and coconut.x<=data.cx+data.r:\r\n data.cy+=data.hitPenalty\r\n if data.mode == \"levelCreated\":\r\n data.lives-=1\r\n elif data.hit ==False and data.level<data.levelMax:\r\n data.score -=data.level\r\n data.coconuts.remove(coconut)\r\n if data.mode == \"levelCreated\":\r\n data.levelEditorLives-=1\r\n\r\n \r\ndef hit2Player(data):\r\n if data.mode == \"2Player\":\r\n if data.Invincible1 == False:\r\n #only when powerup isn't active\r\n for coconut in data.coconuts1:\r\n if coconut.y>=data.player1Y-data.r \\\r\n and coconut.y<=data.player1Y+data.r:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r:\r\n data.player1Y+=data.hitPenalty \r\n data.coconuts1.remove(coconut)\r\n if data.Invincible2 == False:\r\n #only when powerup isn't active\r\n for coconut in data.coconuts2:\r\n if coconut.y>=data.player2Y-data.r and \\\r\n coconut.y<=data.player2Y+data.r:\r\n if coconut.x>=data.player2X-data.r and \\\r\n coconut.x<=data.player2X+data.r:\r\n data.player2Y+=data.hitPenalty \r\n data.coconuts2.remove(coconut)\r\n\r\n\r\nclass PowerUps(Coconuts):\r\n def __init__(self,x,y):\r\n super().__init__(x, y)\r\n\r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.hourGlass)\r\n \r\ndef hitPause(data):\r\n # checks if hits hour-glass & pauses with flag\r\n for powerUp in data.powerUps:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:\r\n if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:\r\n data.pauseDrops = True\r\n data.start = data.cy\r\n data.powerUps.remove(powerUp)\r\n elif data.mode == \"2Player\" or data.mode == \"AI\":\r\n if powerUp.y>=data.player1Y-data.r and \\\r\n powerUp.y<=data.player1Y+data.r:\r\n if powerUp.x>=data.player1X-data.r and \\\r\n powerUp.x<=data.player1X+data.r:\r\n data.pause1Drop = True\r\n data.start1 = data.player1Y\r\n data.powerUps.remove(powerUp)\r\n if powerUp.y>=data.player2Y-data.r and \\\r\n powerUp.y<=data.player2Y+data.r:\r\n if powerUp.x>=data.player2X-data.r and \\\r\n powerUp.x<=data.player2X+data.r:\r\n data.pause2Drop = True\r\n data.start2 = data.player2Y\r\n data.powerUps.remove(powerUp)\r\n \r\n\r\nclass Invincible(PowerUps):\r\n def __init__(self,x,y):\r\n super().__init__(x, y)\r\n \r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.umbrella)\r\n\r\ndef hitInvincible(data):\r\n #checks if hits umbrella powerup\r\n for powerUp in data.invincible:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if powerUp.y>=data.cy-data.r and powerUp.y<=data.cy+data.r:\r\n if powerUp.x>=data.cx-data.r and powerUp.x<=data.cx+data.r:\r\n data.beInvincible = True\r\n data.start = data.cy\r\n data.invincible.remove(powerUp)\r\n if data.mode == \"2Player\" or data.mode == \"AI\":\r\n #for player1\r\n if powerUp.y>=data.player1Y-data.r and \\\r\n powerUp.y<=data.player1Y+data.r:\r\n if powerUp.x>=data.player1X-data.r and \\\r\n powerUp.x<=data.player1X+data.r:\r\n data.Invincible1=True\r\n data.start1 = data.player1Y\r\n data.invincible.remove(powerUp)\r\n # for player 2\r\n if powerUp.y>=data.player2Y-data.r and \\\r\n powerUp.y<=data.player2Y+data.r:\r\n if powerUp.x>=data.player2X-data.r and \\\r\n powerUp.x<=data.player2X+data.r:\r\n data.Invincible2=True\r\n data.start2 = data.player2Y\r\n data.invincible.remove(powerUp)\r\n \r\nclass ScaryBug(object):\r\n def __init__(self,x,y):\r\n self.x = x\r\n self.y = y\r\n self.speed = 25\r\n\r\n def draw(self, canvas, data):\r\n canvas.create_image(self.x, self.y, image=data.spider)\r\n\r\n def onTimerFired(self, data):\r\n if data.mode ==\"2Player\" or data.mode == \"AI\":\r\n self.speed = 35\r\n self.y -= self.speed\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\" and\\\r\n data.time %8 ==0:\r\n #makes spider dynamically move\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if self.x -data.lane >=data.Player1Min:self.x-=data.lane\r\n else: self.x+=data.lane\r\n elif side == \"r\":\r\n if self.x+data.lane<= data.Player1Max:self.x +=data.lane\r\n else: self.x -=data.lane\r\n \r\n \r\n \r\ndef hitScaryBug(data):\r\n # checks for automatic death by spider\r\n for bug in data.scaryBug:\r\n if data.mode == \"1Player\" or data.mode == \"levelCreated\":\r\n if bug.y>=data.cy-1.5*data.r and bug.y<=data.cy+1.5*data.r:\r\n if bug.x>=data.cx-1.5*data.r and bug.x<=data.cx+1.5*data.r:\r\n data.hit = True\r\n data.lives = 0\r\n data.levelEditorLives = 0\r\n if data.mode == \"2Player\" or data.mode == \"AI\":\r\n if bug.y>=data.player1Y-data.r and bug.y<=data.player1Y+data.r:\r\n if bug.x>=data.player1X-data.r and bug.x<=data.player1X+data.r:\r\n data.winner= \"player2\"\r\n if bug.y>=data.player2Y-data.r and bug.y<=data.player2Y+data.r:\r\n if bug.x>=data.player2X-data.r and bug.x<=data.player2X+data.r:\r\n data.winner= \"player1\"\r\n\r\ndef drawPowerups(canvas, data):\r\n for bug in data.scaryBug:\r\n bug.draw(canvas, data)\r\n for powerUp in data.powerUps:\r\n powerUp.draw(canvas, data)\r\n for powerUp in data.invincible:\r\n powerUp.draw(canvas, data)\r\n\r\ndef drawHome(canvas, data):\r\n #home button in every screen\r\n canvas.create_image(data.homeX,data.homeY, image= data.home)\r\n\r\ndef checkHome(event, data):\r\n if data.homeY-data.r<= event.y <= data.homeY +data.r:\r\n if data.homeX-data.r<= event.x<=data.homeX+ data.r:\r\n init(data)\r\n \r\ndef coconutShot(data):\r\n if data.level >0 and data.pauseDrops == False:\r\n if data.time%int(data.levelMax/data.level) == 0 or data.time%6==0:\r\n #increases drops as level increases\r\n xPosition1 = random.randint(0,data.Player1Min-data.buffer)\r\n xPosition2 = random.randint(data.Player1Max+data.buffer,\r\n data.width +data.buffer)\r\n data.coconuts.append(Coconuts(xPosition1,0))\r\n data.coconuts.append(Coconuts(xPosition2,0))\r\n xPosition4 = random.randint(data.Player1Min-data.buffer,\r\n data.Player1Max+data.buffer)\r\n data.coconuts.append(Coconuts(xPosition4,0))\r\n if data.time %5 ==0:\r\n xPosition3 = random.randint(0, data.Player1Min-data.buffer)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n if data.time % int(24/data.level) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.coconuts.append(Coconuts(data.Player1Min,0))\r\n elif side ==\"r\":\r\n data.coconuts.append(Coconuts(data.Player1Max,0))\r\n powerUpCoconutShot(data)\r\n \r\ndef powerUpCoconutShot(data):\r\n #adds powerUps\r\n #magic #s toallow for powerups to be added at different times\r\n if data.time % 60 == 0 and data.time%120 !=0:\r\n Position = random.choice(data.spotList)\r\n data.powerUps.append(PowerUps(Position,0))\r\n if data.time%50 == 0:\r\n Position = random.choice(data.spotList)\r\n data.invincible.append(Invincible(Position,0))\r\n if data.time %100==0:\r\n Position = random.choice(data.spotList)\r\n data.scaryBug.append(ScaryBug(Position,750))\r\n\r\ndef playerKeyPressed(event,data):\r\n if data.level<data.levelMax and event.keysym == \"r\": init(data)\r\n if (event.keysym == \"Left\") and data.cx>=data.Player1Min+(data.lane/2):\r\n data.cx -=(data.lane)/2\r\n elif(event.keysym == \"Right\") and data.cx<=data.Player1Max:\r\n data.cx +=(data.lane)/2\r\n if data.level >= data.levelMax:\r\n #enter name for scoreboard\r\n if len(event.keysym) ==1:\r\n if len(data.name) <15:\r\n data.name += event.keysym\r\n if event.keysym==\"BackSpace\":\r\n data.name = data.name[0:-1]\r\n if event.keysym == \"Return\":\r\n data.scoreList += ((data.score, data.name))\r\n #saves file\r\n writeFile(\"score.txt\",\r\n data.savedScores+str(data.score)+\",\"+data.name+\"\\n\")\r\n data.mode =\"scoreboard\"\r\n \r\n\r\ndef playerMousePressed(event, data): checkHome(event, data)\r\n\r\ndef playerTimerFired(data):\r\n #actually pauses, and moves drops/player\r\n if data.hit== False and data.level<data.levelMax:\r\n data.cy-=data.speed\r\n if data.time%5 ==0: data.score +=data.level\r\n if data.cy < 15: #basically made it to the top\r\n data.level +=1\r\n data.cy = data.Player1Max + 10\r\n data.speed +=2\r\n if data.cy>40: #so drops you can't see don't hit you\r\n data.time +=1\r\n if data.pauseDrops !=True: coconutShot(data)\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible: powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug: bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n for coconut in data.coconuts:\r\n # only want drops to move if not paused\r\n if data.pauseDrops == False: coconut.onTimerFired(data)\r\n if data.beInvincible == False:hit(data)\r\n if data.start != None:\r\n if abs(data.start-data.cy) >= 120:\r\n #to limit time for powerups to be active\r\n data.pauseDrops, data.beInvincible = False, False\r\n\r\ndef playerRedrawAll(canvas, data):\r\n # magic #s mainly for screen placement\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts: coconut.draw(canvas)\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\r\n canvas.create_text(data.width/6,50, text =\"Level: %d\" %data.level,\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/6,80, text =\"Score: %d\" %data.score,\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n canvas.create_text(2*data.width/3,660,\r\n text =\"\"\"The greater the level, the more points get\r\n added to your score!\"\"\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n if data.hit== True:\r\n canvas.create_rectangle(0,0,data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)\r\n canvas.create_text(data.width/2,data.height/4,\r\n text = \"You Lose! Better Luck Next Time!\",\r\n font = \"Helvetica 23 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,280, text =\"Score: %d\" %data.score,\r\n font = \"Arial 13 bold\", fill = \"yellow\")\r\n if data.level >= 8: madeIt(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef madeIt(canvas, data):# magic #s mainly for screen placement\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,70, text = \"You Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,100, text =\"Score: %d\" %data.score,\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,375, text =\"Congrats! Enter your Name!\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n canvas.create_rectangle(data.width/2 - 50, 400, data.width/2+50, 450,\r\n fill = \"white\")\r\n canvas.create_text(data.width/2, 425, text = data.name)\r\n \r\n \r\n####################################\r\n# 2Player mode\r\n#################################### \r\ndef drop2Player(data):\r\n #adds drops when not paused\r\n #magic #s are position of where drops are starting\r\n if data.winner ==None and data.pauseDrops == False:\r\n if data.time%15==0:\r\n xPosition1 = random.randint(0,385)\r\n if abs(xPosition1 - 100)>25 and abs(xPosition1 - 360)>25:\r\n #so random drops don't interfere with the lane ones\r\n if data.pause1Drop != True:\r\n data.coconuts1.append(Coconuts(xPosition1,0))\r\n if data.pause2Drop != True:\r\n data.coconuts2.append(Coconuts(xPosition1 +410,0))\r\n if data.time % 12 ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.coconuts1.append(Coconuts(140,0))\r\n if data.pause2Drop != True:\r\n data.coconuts2.append(Coconuts(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop !=True:data.coconuts1.append(Coconuts(344,0))\r\n if data.pause2Drop!=True:data.coconuts2.append(Coconuts(755,0))\r\n powerupDrop2Player(data)\r\n\r\ndef powerupDrop2Player(data):\r\n #adds powerups on both screens (in the same position)\r\n if data.time % 45 == 0 and data.time%90 !=0:\r\n #randomize placement\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop!=True:data.powerUps.append(PowerUps(140,0))\r\n if data.pause2Drop!=True:data.powerUps.append(PowerUps(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop!=True:data.powerUps.append(PowerUps(344,0))\r\n if data.pause2Drop!=True:data.powerUps.append(PowerUps(755,0))\r\n if data.time%60 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n if data.pause1Drop!=True:data.invincible.append(Invincible(140,0))\r\n if data.pause2Drop!=True:data.invincible.append(Invincible(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop!=True:data.invincible.append(Invincible(344,0))\r\n if data.pause2Drop!=True:data.invincible.append(Invincible(755,0))\r\n if data.time %90==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.scaryBug.append(ScaryBug(140,750))\r\n data.scaryBug.append(ScaryBug(540,750))\r\n elif side ==\"r\":\r\n data.scaryBug.append(ScaryBug(344,750))\r\n data.scaryBug.append(ScaryBug(755,750))\r\n \r\ndef twoPlayerKeyPressed(event,data):\r\n # controllers for both bugs\r\n if event.keysym == \"r\": init(data)\r\n if data.winner==None:\r\n if (event.keysym == \"a\") and data.onLeft1==False:\r\n data.onLeft1 = True\r\n data.player1X = 150\r\n if(event.keysym == \"d\") and data.onLeft1== True:\r\n data.onLeft1 = False\r\n data.player1X = 330\r\n if (event.keysym == \"Left\") and data.onLeft2==False:\r\n data.onLeft2 = True\r\n data.player2X = 550\r\n if(event.keysym == \"Right\") and data.onLeft2 == True:\r\n data.onLeft2 = False\r\n data.player2X = 750\r\n\r\ndef twoPlayerMousePressed(event, data):\r\n checkHome(event, data)\r\n \r\ndef twoPlayerTimerFired(data):\r\n if data.winner == None:\r\n data.player1Y-=data.speed\r\n #<15 signifies that lady bug reached the top\r\n if data.player1Y < 15 and data.player2Y >15:\r\n data.winner= \"player1\"\r\n if data.player1Y>40:\r\n data.time +=1\r\n drop2Player(data)\r\n data.player2Y-=data.speed\r\n if data.player2Y < 15 and data.player1Y> 15:\r\n data.winner= \"player2\"\r\n if data.player2Y>40:\r\n data.time +=1\r\n drop2Player(data)\r\n if data.player1Y < 15 and data.player2Y <15:\r\n data.winner = \"tie\"\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible:powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug:bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n powerupTimerFired(data)\r\n\r\ndef powerupTimerFired(data):\r\n for coconut in data.coconuts1:\r\n if data.pause1Drop == False:\r\n coconut.onTimerFired(data)\r\n hit2Player(data)\r\n for coconut in data.coconuts2:\r\n if data.pause2Drop == False:\r\n coconut.onTimerFired(data) \r\n if data.start1 != None:\r\n # to make powerups only active for set amount of time\r\n if abs(data.start1-data.player1Y) >= 120:\r\n data.pause1Drop = False\r\n data.Invincible1 = False\r\n if data.start2 != None:\r\n if abs(data.start2-data.player2Y) >= 120:\r\n data.pause2Drop = False\r\n data.Invincible2 = False\r\n \r\n\r\ndef twoPlayerRedrawAll(canvas, data):\r\n #magic #s for placement on screen\r\n canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)\r\n canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)\r\n canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts1: coconut.draw(canvas)\r\n for coconut in data.coconuts2: coconut.draw(canvas)\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\r\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\r\n canvas.create_text(50,40, text = \"Player 1\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n canvas.create_text(450,40, text = \"Player 2\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n winner(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef winner(canvas, data):\r\n if data.winner== \"player1\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! Player 1\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"player2\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! Player 2\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"tie\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"Tie! You Both Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# editor mode\r\n####################################\r\n\r\ndef editorKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef editorMousePressed(event, data):\r\n #check for click on button for your speed\r\n checkHome(event, data)\r\n if data.easyY-data.r<= event.y <= data.easyY +data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.yourSpeed = \"slow\"\r\n data.slow = data.click\r\n data.medium, data.fast = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.yourSpeed = \"medium\"\r\n data.medium = data.click\r\n data.slow, data.fast = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.yourSpeed = \"fast\"\r\n data.fast = data.click\r\n data.slow, data.medium = data.notClick, data.notClick\r\n checkMiddle(event, data)\r\n checkLast(event, data)\r\n\r\ndef checkMiddle(event, data):\r\n #check for click on button for rain speed\r\n if data.medX-data.r<= event.y <= data.medX + data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.rainSpeed = \"drizzle\"\r\n data.drizzle = data.click\r\n data.rain, data.thunderstorm = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.rainSpeed = \"rain\"\r\n data.rain = data.click\r\n data.drizzle, data.thunderstorm = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.rainSpeed = \"thunderstorm\"\r\n data.thunderstorm = data.click\r\n data.drizzle, data.rain = data.notClick, data.notClick\r\n\r\ndef checkLast(event, data):\r\n #check for click on button for powerups\r\n if data.last-data.r<=event.y<= data.last+data.r:\r\n if data.easyY-2*data.r<= event.x<=data.easyY+2*data.r:\r\n data.powerUpsEditor = True\r\n data.yes, data.no = data.click, data.notClick\r\n if data.last-2*data.r<= event.x<=data.last+2*data.r:\r\n data.powerUpsEditor = False\r\n data.no, data.yes = data.click, data.notClick\r\n if data.enter == data.click:\r\n if data.enterX-data.r<=event.y<=data.enterX+data.r:\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.mode=\"levelCreated\"\r\n \r\n \r\n\r\ndef drawButtons(canvas, data):\r\n #makes each button\r\n data.font, data.fill = \"Helvetica 13 bold\", \"yellow\"\r\n canvas.create_text(data.medX,data.YST, text= \"Your Speed:\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_image(data.easyX,data.easyY, image = data.slow)\r\n canvas.create_text(data.easyX,data.easyY, text=\"Slow\", font = data.font)\r\n canvas.create_image(data.medX,data.easyY, image = data.medium)\r\n canvas.create_text(data.medX,data.easyY, text=\"Medium\", font = data.font)\r\n canvas.create_image(data.hardX,data.easyY, image = data.fast)\r\n canvas.create_text(data.hardX,data.easyY, text=\"Fast\",font = data.font)\r\n canvas.create_image(data.easyX,data.medX, image = data.drizzle)\r\n canvas.create_text(data.medX,data.RST, text= \"Rain Speed:\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_text(data.easyX,data.medX, text=\"Drizzle\",font = data.font)\r\n canvas.create_image(data.medX,data.medX, image = data.rain)\r\n canvas.create_text(data.medX,data.medX, text=\"Rain\",font = data.font)\r\n canvas.create_image(data.hardX,data.medX, image = data.thunderstorm)\r\n canvas.create_text(data.hardX,data.medX, text=\"Heavy\",font = data.font)\r\n canvas.create_text(data.medX,data.PUT, text= \"PowerUps?\",\r\n font = data.font,fill =data.fill)\r\n canvas.create_image(data.easyY,data.last, image = data.yes)\r\n canvas.create_text(data.easyY,data.last, text=\"Yes\",font = data.font)\r\n canvas.create_image(data.last,data.last, image = data.no)\r\n canvas.create_text(data.last,data.last, text=\"No\",font = data.font)\r\n changeEnter(canvas, data)\r\n\r\ndef changeEnter(canvas, data):\r\n #makes it so the enter button respond to click\r\n if data.powerUpsEditor != None and data.yourSpeed != None and \\\r\n data.rainSpeed != None: data.enter = data.click\r\n canvas.create_image(data.medX,data.enterX, image = data.enter)\r\n canvas.create_text(data.medX,data.enterX, text=\"Enter\",font = data.font)\r\n\r\ndef editorTimerFired(data):\r\n data.editorTime += 1\r\n if data.editorTime %2 ==0:\r\n rainDrop(data)\r\n for drop in data.editorDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef rainDrop(data):\r\n #background drops\r\n xPosition = random.randint(0,data.width)\r\n data.editorDrops.append(Coconuts(xPosition,0))\r\n\r\ndef editorRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.height/2, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.S_P -10, text = \"Edit Your Level!\",\r\n font=\"Arial 23 bold\", fill = \"yellow\")\r\n drawButtons(canvas, data)\r\n drawHome(canvas, data)\r\n####################################\r\n# levelCreated mode\r\n####################################\r\ndef setEverything(data):\r\n #customizing game\r\n if data.yourSpeed == \"slow\": data.speed = 6\r\n elif data.yourSpeed == \"medium\": data.speed = 10\r\n elif data.yourSpeed == \"fast\": data.speed = 14\r\n if data.rainSpeed == \"thunderstorm\": data.rSpeed = 7\r\n elif data.rainSpeed == \"rain\": data.rSpeed = 10\r\n elif data.rainSpeed == \"drizzle\": data.rSpeed = 13\r\n \r\n\r\ndef levelCoconutShot(data):\r\n #adding drops\r\n if data.levelEditorLives >0:\r\n if data.time%int(0.35*data.rSpeed) == 0:\r\n xPosition1 = random.randint(0,data.Player1Min-data.buffer)\r\n xPosition2 = random.randint(770, 870)\r\n xPosition3 = random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n data.coconuts.append(Coconuts(xPosition1,0))\r\n data.coconuts.append(Coconuts(xPosition2,0))\r\n if data.time % int(0.55*data.rSpeed) ==0:\r\n xPosition3 = random.randint(0, 220)\r\n xPosition5 = random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition3,0))\r\n data.coconuts.append(Coconuts(xPosition5,0))\r\n if data.time % int(data.rSpeed) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\": \r\n data.coconuts.append(Coconuts(3*data.width/8-20,0))\r\n elif side ==\"r\":\r\n data.coconuts.append(Coconuts(7*data.width/8+40,0))\r\n xPosition4= random.randint(220,770)\r\n data.coconuts.append(Coconuts(xPosition4,0))\r\n \r\n levelPowerUp(data)\r\n\r\ndef levelPowerUp(data):\r\n # adding power-ups only if clicked yes\r\n if data.powerUpsEditor == True:\r\n if data.time % 20 == 0 and data.time%40 !=0:\r\n Position = random.choice(data.spotList)\r\n data.powerUps.append(PowerUps(Position,0))\r\n if data.time%30 == 0:\r\n Position = random.choice(data.spotList)\r\n data.invincible.append(Invincible(Position,0))\r\n if data.time %35==0:\r\n Position = random.choice(data.spotList)\r\n data.scaryBug.append(ScaryBug(Position,750))\r\n\r\ndef levelCreatedKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n if data.levelEditorLives>0:\r\n if (event.keysym == \"Left\") and data.cx>=317:\r\n data.cx -=(data.lane/2)\r\n elif(event.keysym == \"Right\") and data.cx<=740:\r\n data.cx +=(data.lane/2)\r\n\r\ndef levelCreatedMousePressed(event, data):\r\n checkHome(event, data)\r\n\r\ndef levelCreatedTimerFired(data):\r\n setEverything(data)\r\n if data.levelEditorLives>0:\r\n data.cy-=data.speed\r\n if data.cy < 15:\r\n data.level +=1\r\n if data.cy>40:\r\n data.time +=1\r\n if data.pauseDrops !=True: levelCoconutShot(data)\r\n if data.powerUpsEditor == False:\r\n for coconut in data.coconuts: coconut.onTimerFired(data)\r\n hit(data)\r\n if data.powerUpsEditor == True:\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n for powerUp in data.invincible: powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug: bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n for coconut in data.coconuts:\r\n if data.pauseDrops == False:coconut.onTimerFired(data)\r\n if data.beInvincible == False: hit(data)\r\n if data.start != None:\r\n #to make powerups only active for set amount of time\r\n if abs(data.start-data.cy) >= 120:\r\n data.pauseDrops, data.beInvincible = False, False\r\n\r\n\r\ndef levelCreatedRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconuts: coconut.draw(canvas)\r\n if data.powerUpsEditor == True: drawPowerups(canvas, data)\r\n canvas.create_image(data.cx, data.cy, image=data.ladyBug)\r\n canvas.create_text(data.width/6,100,\r\n text =\"Total Lives: %d\" %data.levelEditorLives,\r\n font = \"Arial 20 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2,660,\r\n text =\"\"\"You lose a life for hitting a drop\r\n & don't get eaten!\"\"\",\r\n font = \"Arial 15 bold\", fill = \"yellow\")\r\n if data.levelEditorLives <=0:\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.deadScreen)\r\n canvas.create_text(data.width/2,data.height/4,\r\n text = \"You Lose! Better Luck Next Time!\",\r\n font = \"Helvetica 23 bold\", fill = \"yellow\") \r\n if data.level > 1: winEditor(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef winEditor(canvas, data):\r\n #screen for when you win\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# AI Difficulty Mode\r\n####################################\r\ndef difficultyKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef drawDifficulties(canvas, data):\r\n canvas.create_text(data.medX,data.AITY, text= \"Computer Difficulty:\",\r\n font=\"Arial 23 bold\", fill = \"yellow\") \r\n canvas.create_image(data.easyX, data.easyY, image=data.slow)\r\n canvas.create_text(data.easyX,data.easyY, text=\"Easy\")\r\n canvas.create_image(data.medX, data.easyY, image=data.medium)\r\n canvas.create_text(data.medX,data.easyY, text=\"Medium\")\r\n canvas.create_image(data.hardX, data.easyY, image=data.fast)\r\n canvas.create_text(data.hardX,data.easyY, text=\"Hard\")\r\n if data.difficulty !=None:\r\n data.enter = data.click\r\n canvas.create_image(data.medX, data.enterY, image=data.enter)\r\n canvas.create_text(data.medX,data.enterY, text=\"Enter\")\r\n\r\ndef difficultyMousePressed(event, data):\r\n #sets up buttons to customize\r\n checkHome(event, data)\r\n if data.easyY-data.r<= event.y <= data.easyY +data.r:\r\n if data.easyX-2*data.r<= event.x<=data.easyX+2*data.r:\r\n data.difficulty = data.difS\r\n data.slow = data.click\r\n data.medium, data.fast = data.notClick, data.notClick\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.difficulty = data.difM\r\n data.medium = data.click\r\n data.slow, data.fast = data.notClick, data.notClick\r\n if data.hardX-2*data.r<= event.x<=data.hardX+2*data.r:\r\n data.difficulty = data.difH\r\n data.fast = data.click\r\n data.slow, data.medium = data.notClick, data.notClick\r\n if data.enter == data.click:\r\n if data.enterY-data.r<=event.y<=data.enterY+data.r:\r\n if data.medX-2*data.r<= event.x<=data.medX+2*data.r:\r\n data.mode=\"AI\"\r\n\r\ndef difficultyTimerFired(data):\r\n # makes normal background rain\r\n data.editorTime += 1\r\n if data.editorTime %2 ==0:\r\n rainDrop(data)\r\n for drop in data.editorDrops:\r\n drop.onTimerFired(data)\r\n\r\ndef rainDrop(data):\r\n xPosition = random.randint(0,data.width)\r\n data.editorDrops.append(Coconuts(xPosition,0))\r\n\r\ndef difficultyRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.height/2, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n drawDifficulties(canvas, data)\r\n drawHome(canvas, data)\r\n\r\n####################################\r\n# AI mode\r\n####################################\r\ndef hitAI1(data, distance):\r\n for coconut in data.coconutsAI1:\r\n # so AI switches by itself\r\n if (data.player1Y-data.r - coconut.y<=distance) and \\\r\n data.switchOnProgress == False:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r or AISwitchBug(data,distance)==True:\r\n testInt = random.randint(0,9)\r\n # to have different levels of difficulty\r\n if testInt<= data.difficulty:\r\n data.switchOnProgress= True\r\n if data.player1X == 150:\r\n data.player1X = 340\r\n else:\r\n data.player1X = 150\r\n data.switchOnProgress= False\r\n if coconut.y>=data.player1Y-data.r and coconut.y<=data.player1Y+data.r:\r\n if coconut.x>=data.player1X-data.r and \\\r\n coconut.x<=data.player1X+data.r:\r\n data.player1Y+=50\r\n data.coconutsAI1.remove(coconut)\r\n\r\ndef AISwitchBug(data, distance):\r\n #AI to move for spider\r\n for scaryBug in data.scaryBug:\r\n if (data.player1Y-data.r - scaryBug.y<=distance) and \\\r\n data.switchOnProgress == False:\r\n if scaryBug.x>=data.player1X-data.r and \\\r\n scaryBug.x<=data.player1X+data.r:\r\n return True\r\n\r\ndef hitAI2(data, distance):\r\n # check if human controlled player hits drops\r\n for coconut in data.coconutsAI2:\r\n if coconut.y>=data.player2Y-data.r and coconut.y<=data.player2Y+data.r:\r\n if coconut.x>=data.player2X-data.r and \\\r\n coconut.x<=data.player2X+data.r:\r\n data.player2Y+=50 \r\n data.coconutsAI2.remove(coconut)\r\n \r\ndef coconutShotAI(data):\r\n if data.winner ==None:\r\n # randomize position of drops off of tree\r\n if data.time%15==0:\r\n xPosition1 = random.randint(0,385)\r\n if abs(xPosition1 - 100)>40 and abs(xPosition1 - 360)>40:\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(xPosition1,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(xPosition1 +410,0))\r\n if data.time%8 ==0:\r\n xPosition2 = random.randint(0,80)\r\n xPosition3 = random.randint(364, 385)\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(xPosition2,0))\r\n data.coconutsAI1.append(Coconuts(xPosition3,0)) \r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(xPosition2+410,0))\r\n data.coconutsAI2.append(Coconuts(xPosition3+410,0))\r\n addExtraCoconut(data)\r\n addPowerUpsAI(data)\r\n\r\ndef addExtraCoconut(data):\r\n #adds drops to edges of trees\r\n if data.time % (18) ==0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(140,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(540,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.coconutsAI1.append(Coconuts(344,0))\r\n if data.pause2Drop != True:\r\n data.coconutsAI2.append(Coconuts(755,0))\r\n if data.time % 37 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.powerUps.append(PowerUps(140,0))\r\n if data.pause2Drop != True:\r\n data.powerUps.append(PowerUps(550,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.powerUps.append(PowerUps(344,0))\r\n if data.pause2Drop != True:\r\n data.powerUps.append(PowerUps(755,0))\r\n \r\ndef addPowerUpsAI(data):\r\n #randomly add powerups on tree\r\n if data.time%33 == 0:\r\n side = random.choice(data.sides)\r\n if side == \"l\":\r\n if data.pause1Drop != True:\r\n data.invincible.append(Invincible(140,0))\r\n if data.pause2Drop != True:\r\n data.invincible.append(Invincible(550,0))\r\n elif side ==\"r\":\r\n if data.pause1Drop != True:\r\n data.invincible.append(Invincible(344,0))\r\n if data.pause2Drop != True:\r\n data.invincible.append(Invincible(755,0))\r\n if data.time %66==0:\r\n side = random.choice(data.sides) \r\n if side == \"l\":\r\n data.scaryBug.append(ScaryBug(140,750))\r\n data.scaryBug.append(ScaryBug(550,750))\r\n elif side ==\"r\":\r\n data.scaryBug.append(ScaryBug(344,750))\r\n data.scaryBug.append(ScaryBug(750,750))\r\n\r\n \r\ndef AIKeyPressed(event,data):\r\n if event.keysym == \"r\": init(data)\r\n if data.winner==None:\r\n if (event.keysym == \"Left\") and data.onLeft1==False:\r\n data.onLeft1 = True\r\n data.player2X = 550\r\n elif(event.keysym == \"Right\") and data.onLeft1== True:\r\n data.onLeft1 = False\r\n data.player2X = 750\r\n\r\ndef AIMousePressed(event, data): checkHome(event, data)\r\ndef AITimerFired(data):\r\n if data.winner == None:\r\n #want to check hit twice (before & after elements move)\r\n if data.Invincible1 == False:hitAI1(data, 31)\r\n if data.Invincible2 == True: pass\r\n elif data.Invincible2 == False:hitAI2(data, 31)\r\n for coconut in data.coconutsAI1:\r\n if data.pause1Drop == False:coconut.onTimerFired(data)\r\n for coconut in data.coconutsAI2:\r\n if data.pause2Drop == False:coconut.onTimerFired(data)\r\n # second check\r\n if data.Invincible1 == False:hitAI1(data,13)\r\n if data.Invincible2 == True:pass\r\n elif data.Invincible2 == False:hitAI2(data,13)\r\n data.player1Y-=data.speedAI\r\n #establishing winer\r\n if data.player1Y < 15 and data.player2Y >15: data.winner= \"player1\"\r\n if data.player1Y>40:\r\n data.time +=1\r\n coconutShotAI(data)\r\n data.player2Y-=data.speedAI\r\n if data.player2Y < 15 and data.player1Y> 15: data.winner= \"player2\" \r\n if data.player2Y>40:\r\n data.time +=1\r\n coconutShotAI(data)\r\n if data.player1Y < 15 and data.player2Y <15: data.winner = \"tie\"\r\n for powerUp in data.powerUps: powerUp.onTimerFired(data)\r\n hitPause(data)\r\n powerUpAITimerFired(data)\r\n\r\ndef powerUpAITimerFired(data):\r\n #moves both sides symmetrically \r\n for powerUp in data.invincible:\r\n powerUp.onTimerFired(data)\r\n hitInvincible(data)\r\n for bug in data.scaryBug:\r\n bug.onTimerFired(data)\r\n hitScaryBug(data)\r\n if data.start1 != None:\r\n if abs(data.start1-data.player1Y) >= 120:\r\n data.pause1Drop = False\r\n data.Invincible1 = False\r\n if data.start2 != None:\r\n if abs(data.start2-data.player2Y) >= 120:\r\n data.pause2Drop = False\r\n data.Invincible2 = False\r\n \r\n\r\n\r\ndef AIRedrawAll(canvas, data):\r\n canvas.create_image(data.width/4, data.height/2, image=data.halfBackground)\r\n canvas.create_image(3*data.width/4, data.height/2,image=data.halfBackground)\r\n canvas.create_line(data.width/2, 0, data.width/2, data.height, width = 10)\r\n canvas.create_line(0,20, data.width, 20)\r\n for coconut in data.coconutsAI1:\r\n coconut.draw(canvas)\r\n for coconut in data.coconutsAI2:\r\n coconut.draw(canvas)\r\n canvas.create_text(50,40, text = \"Computer\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n canvas.create_text(450,40, text = \"Player 1\",font = \"Arial 15 bold\",\r\n fill = \"yellow\")\r\n drawPowerups(canvas, data)\r\n canvas.create_image(data.player1X, data.player1Y, image=data.ladyBug)\r\n canvas.create_image(data.player2X, data.player2Y, image=data.ladyBug)\r\n AIWinner(canvas, data)\r\n drawHome(canvas, data)\r\n\r\ndef AIWinner(canvas, data):\r\n if data.winner== \"player1\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"The Computer Won :(\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"player2\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"You Made it! You Won!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n elif data.winner== \"tie\":\r\n canvas.create_rectangle(0,0, data.width, data.height, fill = \"black\")\r\n canvas.create_image(data.width/2, data.height/2, image=data.winScreen)\r\n canvas.create_image(300, 320, image=data.winBug)\r\n canvas.create_text(data.width/2,100, text = \"Tie! You Both Made it!\",\r\n font = \"Arial 23 bold\", fill = \"yellow\")\r\n####################################\r\n# ScoreBoard mode\r\n####################################\r\n\r\ndef scoreboardKeyPressed(event, data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef scoreboardMousePressed(event, data): checkHome(event, data)\r\n\r\ndef scoreboardTimerFired(data):\r\n difficultyTimerFired(data)\r\n\r\ndef scoreboardRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.height/2, image=data.background)\r\n canvas.create_image(data.width/2, data.tbgY, image=data.tbg)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n canvas.create_text(data.width/2, data.txtTScore, text=\"Top Scores!\",\r\n font = \"Arial 30 bold\", fill = \"yellow\")\r\n canvas.create_text(data.width/2, data.S_P, text=\"Score_Player\",\r\n font = \"Arial 20 bold\", fill = \"yellow\")\r\n drawHome(canvas, data)\r\n #reads file\r\n data.savedScores\r\n data.savedScores=readFile(\"score.txt\")\r\n score=data.savedScores.splitlines()\r\n scores=[]\r\n for line in score:\r\n scores.append(line.split(\",\"))\r\n #sorts scores to find top 5\r\n scores = sorted(scores, key = lambda x: int(x[0]))\r\n top5 = scores[-data.numScores:]\r\n top5.reverse()\r\n for i in range(len(top5)):\r\n canvas.create_text(data.width/2, data.scoreShift+(i*50),\r\n text = top5[i],\r\n font = \"Arial 18 bold\", fill = \"yellow\")\r\n\r\n####################################\r\n# help mode\r\n####################################\r\n\r\ndef helpKeyPressed(event, data):\r\n if event.keysym == \"r\": init(data)\r\n\r\ndef helpMousePressed(event, data): checkHome(event, data)\r\n\r\ndef helpTimerFired(data):\r\n difficultyTimerFired(data)\r\n\r\ndef helpRedrawAll(canvas, data):\r\n canvas.create_image(data.width/2, data.helpY, image=data.helpScreen)\r\n for drop in data.editorDrops:\r\n drop.draw(canvas)\r\n drawHome(canvas, data)\r\n\r\n#######################################\r\n# use the run function as-is from notes\r\n#######################################\r\n\r\ndef run(width=15000, height=25000):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n redrawAll(canvas, data)\r\n canvas.update() \r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def timerFiredWrapper(canvas, data):\r\n timerFired(data)\r\n redrawAllWrapper(canvas, data)\r\n # pause, then call timerFired again\r\n canvas.after(data.timerDelay, timerFiredWrapper, canvas, data)\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n data.timerDelay = 100 # milliseconds\r\n # create the root and the canvas\r\n root = Tk()\r\n init(data)\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n timerFiredWrapper(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\nrun(1000, 1000)\r\n", "step-ids": [ 66, 79, 82, 95, 104 ] }
[ 66, 79, 82, 95, 104 ]
from hicity.graphics.graphics import HiCityGUI def GUI(): app = HiCityGUI() app.mainloop() if __name__ == '__main__': GUI()
normal
{ "blob_id": "dd96b7f73c07bf0c74e6ce4dbff1a9cc09729b72", "index": 7918, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef GUI():\n app = HiCityGUI()\n app.mainloop()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef GUI():\n app = HiCityGUI()\n app.mainloop()\n\n\nif __name__ == '__main__':\n GUI()\n", "step-4": "from hicity.graphics.graphics import HiCityGUI\n\n\ndef GUI():\n app = HiCityGUI()\n app.mainloop()\n\n\nif __name__ == '__main__':\n GUI()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import cv2 import random import os import numpy as np import matplotlib.pyplot as plt import torch from torch.utils.data import Dataset from torchvision import transforms class ShanghaiTechPartA(Dataset): def __init__(self, root, shuffle=False, transform=None, downsample=1): self.root = root self.shuffle = shuffle self.transform = transform self.downsample = downsample self.image_names = [filename for filename in os.listdir(self.root)] self.n_samples = len(self.image_names) if self.shuffle: random.shuffle(self.image_names) def __len__(self): return self.n_samples def __getitem__(self, index): assert index <= len(self), 'index range error' img_name = self.image_names[index] # Read image and normalize its pixels to [0,1] img = plt.imread(os.path.join(self.root,img_name)) / 255 # Expand grayscale image to three channel. if len(img.shape) == 2: img = img[:,:,np.newaxis] img = np.concatenate((img,img,img),2) # Read ground truth density-map density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy'))) # Downsample image and density-map to match model's input if self.downsample >1: rows = int(img.shape[0] // self.downsample) cols = int(img.shape[1] // self.downsample) img = cv2.resize(img,(cols*self.downsample, rows*self.downsample)) img = img.transpose((2,0,1)) # convert to order (channel,rows,cols) density_map = cv2.resize(density_map, (cols,rows)) density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample # transform image and density_map to tensors img_tensor = torch.tensor(img, dtype=torch.float) density_map_tensor = torch.tensor(density_map, dtype=torch.float) # Apply any other transformation if self.transform is not None: img_tensor = self.transform(img_tensor) return img_tensor, density_map_tensor # Test code if __name__== "__main__": root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images' dataset = ShanghaiTechPartA(root, transform=transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8) index = random.randint(0, len(dataset)) img, dmap = dataset[index] print(index, img.shape, dmap.shape)
normal
{ "blob_id": "8a0a98ab072e46463d80d8638c830e6db0032a77", "index": 8101, "step-1": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n <mask token>\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n", "step-4": "import cv2\nimport random\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n img = plt.imread(os.path.join(self.root, img_name)) / 255\n if len(img.shape) == 2:\n img = img[:, :, np.newaxis]\n img = np.concatenate((img, img, img), 2)\n density_map = np.load(os.path.join(self.root.replace('images',\n 'density_maps'), img_name.replace('.jpg', '.npy')))\n if self.downsample > 1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img, (cols * self.downsample, rows * self.\n downsample))\n img = img.transpose((2, 0, 1))\n density_map = cv2.resize(density_map, (cols, rows))\n density_map = density_map[np.newaxis, :, :\n ] * self.downsample * self.downsample\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n return img_tensor, density_map_tensor\n\n\nif __name__ == '__main__':\n root = (\n '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n )\n dataset = ShanghaiTechPartA(root, transform=transforms.Normalize(mean=[\n 0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n", "step-5": "import cv2\nimport random\nimport os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\n\nclass ShanghaiTechPartA(Dataset):\n def __init__(self, root, shuffle=False, transform=None, downsample=1):\n self.root = root\n self.shuffle = shuffle\n self.transform = transform\n self.downsample = downsample\n\n self.image_names = [filename for filename in os.listdir(self.root)]\n self.n_samples = len(self.image_names)\n\n if self.shuffle:\n random.shuffle(self.image_names)\n\n def __len__(self):\n return self.n_samples\n\n def __getitem__(self, index):\n assert index <= len(self), 'index range error'\n img_name = self.image_names[index]\n # Read image and normalize its pixels to [0,1]\n img = plt.imread(os.path.join(self.root,img_name)) / 255\n # Expand grayscale image to three channel.\n if len(img.shape) == 2:\n img = img[:,:,np.newaxis]\n img = np.concatenate((img,img,img),2)\n\n # Read ground truth density-map\n density_map = np.load(os.path.join(self.root.replace('images','density_maps'),img_name.replace('.jpg','.npy')))\n\n # Downsample image and density-map to match model's input\n if self.downsample >1:\n rows = int(img.shape[0] // self.downsample)\n cols = int(img.shape[1] // self.downsample)\n img = cv2.resize(img,(cols*self.downsample, rows*self.downsample))\n img = img.transpose((2,0,1)) # convert to order (channel,rows,cols)\n density_map = cv2.resize(density_map, (cols,rows))\n density_map = density_map[np.newaxis,:,:] * self.downsample * self.downsample\n # transform image and density_map to tensors\n img_tensor = torch.tensor(img, dtype=torch.float)\n density_map_tensor = torch.tensor(density_map, dtype=torch.float)\n # Apply any other transformation\n if self.transform is not None:\n img_tensor = self.transform(img_tensor)\n\n return img_tensor, density_map_tensor\n\n\n# Test code\nif __name__== \"__main__\":\n root = '/Users/pantelis/Downloads/archive/ShanghaiTech/part_A/train_data/images'\n dataset = ShanghaiTechPartA(root,\n transform=transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n downsample=8)\n index = random.randint(0, len(dataset))\n img, dmap = dataset[index]\n print(index, img.shape, dmap.shape)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# -*- coding: utf-8 -*- """ helpers ~~~~~~~ Implements various helper functions. :copyright: (c) 2016 by Patrick Spencer. :license: Apache 2.0, see LICENSE for more details. """ from datetime import datetime, timedelta import calendar def month_bounds(year, month): """ Returns a tuple of datetime objects (month_start,month_end) given a year and month. Both params are strings because we want month to be a two digit month representation and python doesn't handle leading zeros in integers as we want. :param year: four digit year as a string e.g. "2016" :param month: 2 digit month as a string e.g. 2 for February, 11 for November """ year = int(year) month = int(month) month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d') # days_in_month returns a tuple(weekday, days) where # weekday is the eekday the month starts on and days is the number of days in the month days_in_month = calendar.monthrange(year,month) month_end = month_start + timedelta(days=days_in_month[1]-1) return (month_start, month_end)
normal
{ "blob_id": "4c5416582afb3cfeb56259954cda2701ea26f8cd", "index": 7780, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month), '%Y,%m,%d')\n days_in_month = calendar.monthrange(year, month)\n month_end = month_start + timedelta(days=days_in_month[1] - 1)\n return month_start, month_end\n", "step-3": "<mask token>\nfrom datetime import datetime, timedelta\nimport calendar\n\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month), '%Y,%m,%d')\n days_in_month = calendar.monthrange(year, month)\n month_end = month_start + timedelta(days=days_in_month[1] - 1)\n return month_start, month_end\n", "step-4": "# -*- coding: utf-8 -*-\n\"\"\"\n helpers\n ~~~~~~~\n Implements various helper functions.\n\n :copyright: (c) 2016 by Patrick Spencer.\n :license: Apache 2.0, see LICENSE for more details.\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport calendar\n\ndef month_bounds(year, month):\n \"\"\"\n Returns a tuple of datetime objects (month_start,month_end) given a year and month.\n Both params are strings because we want month to be a two digit month representation\n and python doesn't handle leading zeros in integers as we want.\n\n :param year: four digit year as a string e.g. \"2016\"\n :param month: 2 digit month as a string e.g. 2 for February, 11 for November\n \"\"\"\n year = int(year)\n month = int(month)\n month_start = datetime.strptime('%s,%s,1' % (year, month),'%Y,%m,%d')\n # days_in_month returns a tuple(weekday, days) where\n # weekday is the eekday the month starts on and days is the number of days in the month\n days_in_month = calendar.monthrange(year,month)\n month_end = month_start + timedelta(days=days_in_month[1]-1)\n return (month_start, month_end)\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# coding: utf-8 from sqlalchemy import Column, DateTime, Integer, String from sqlalchemy.schema import FetchedValue from application import db class BmExam(db.Model): __tablename__ = 'bm_exam' id = db.Column(db.Integer, primary_key=True) status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue()) exam_id = db.Column(db.Integer, nullable=False) exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue()) show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue()) numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue()) x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue()) m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue()) rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue()) start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue()) end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue()) beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue()) beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue()) beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue()) updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue()) created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())
normal
{ "blob_id": "6be2cc99d03596715d76cda41d63b8c91c829498", "index": 2211, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass BmExam(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n", "step-4": "from sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db\n .FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False,\n server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.\n FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.\n FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.\n FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.\n FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.\n FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db\n .FetchedValue())\n", "step-5": "# coding: utf-8\nfrom sqlalchemy import Column, DateTime, Integer, String\nfrom sqlalchemy.schema import FetchedValue\nfrom application import db\n\n\nclass BmExam(db.Model):\n __tablename__ = 'bm_exam'\n\n id = db.Column(db.Integer, primary_key=True)\n status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n exam_id = db.Column(db.Integer, nullable=False)\n exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n show_exam_name = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n numbers = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n x_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n m_rules = db.Column(db.String(1000), nullable=False, server_default=db.FetchedValue())\n rule_status = db.Column(db.Integer, nullable=False, server_default=db.FetchedValue())\n start_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n end_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n beizhu = db.Column(db.String(2000), nullable=False, server_default=db.FetchedValue())\n beizhu2 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n beizhu3 = db.Column(db.String(200), nullable=False, server_default=db.FetchedValue())\n updated_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n created_time = db.Column(db.DateTime, nullable=False, server_default=db.FetchedValue())\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import requests import toml from pathlib import Path imgs:list config:dict def parseTex(lines:list): new_lines = [] for i, line in enumerate(lines): if line == "\n": continue inline = False if (line[0] == "$" and line[1] != "$"): inline = True line = line.replace("$", "") line = line.replace("\n", "") line = line.replace(" ", "&space;") line = line.replace("+", "&plus;") new_lines.append((line, inline)) return new_lines def addColor(lines:list, color:str): colortag = "{\color[RGB]{" + color + "}" return ["""\inline""" + colortag + line[0] + "}" if(line[1]) else colortag + line[0] + "}" for line in lines] if Path("config.toml").exists(): with open("config.toml", "r") as loadconfig: config = toml.load(loadconfig) if config == {}: config = {"colors": ["0, 0, 0"], "outputs": [""]} else: config = {"colors": ["0, 0, 0"], "outputs": [""]} with open("tex.txt", "r") as tex: imgs = tex.readlines() imgs = parseTex(imgs) #returns a list of tuples, [0] is the parsed text, [1] is an inline boolean for i, color in enumerate(config["colors"]): coloredimgs = addColor(imgs, color) output = "output" / Path(config["outputs"][i]) if (not output.exists()): output.mkdir() for j, tex in enumerate(coloredimgs): link = "https://latex.codecogs.com/svg.latex?" + tex print(link) r = requests.get(link) with open(output / ("latex" + str(j) + ".svg"), "wb") as svg: svg.write(r.content)
normal
{ "blob_id": "dbd04f7b88fa43ae920a6744e3979dbf917d3fc6", "index": 7649, "step-1": "<mask token>\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '&plus;')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\n<mask token>\n", "step-2": "<mask token>\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '&plus;')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\n<mask token>\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n", "step-3": "<mask token>\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '&plus;')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\nimgs = parseTex(imgs)\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n", "step-4": "import requests\nimport toml\nfrom pathlib import Path\nimgs: list\nconfig: dict\n\n\ndef parseTex(lines: list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == '\\n':\n continue\n inline = False\n if line[0] == '$' and line[1] != '$':\n inline = True\n line = line.replace('$', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '&space;')\n line = line.replace('+', '&plus;')\n new_lines.append((line, inline))\n return new_lines\n\n\ndef addColor(lines: list, color: str):\n colortag = '{\\\\color[RGB]{' + color + '}'\n return [('\\\\inline' + colortag + line[0] + '}' if line[1] else colortag +\n line[0] + '}') for line in lines]\n\n\nif Path('config.toml').exists():\n with open('config.toml', 'r') as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nelse:\n config = {'colors': ['0, 0, 0'], 'outputs': ['']}\nwith open('tex.txt', 'r') as tex:\n imgs = tex.readlines()\nimgs = parseTex(imgs)\nfor i, color in enumerate(config['colors']):\n coloredimgs = addColor(imgs, color)\n output = 'output' / Path(config['outputs'][i])\n if not output.exists():\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = 'https://latex.codecogs.com/svg.latex?' + tex\n print(link)\n r = requests.get(link)\n with open(output / ('latex' + str(j) + '.svg'), 'wb') as svg:\n svg.write(r.content)\n", "step-5": "import requests\nimport toml\nfrom pathlib import Path\n\nimgs:list\nconfig:dict\n\ndef parseTex(lines:list):\n new_lines = []\n for i, line in enumerate(lines):\n if line == \"\\n\":\n continue\n\n inline = False\n if (line[0] == \"$\" and line[1] != \"$\"):\n inline = True\n line = line.replace(\"$\", \"\")\n line = line.replace(\"\\n\", \"\")\n line = line.replace(\" \", \"&space;\")\n line = line.replace(\"+\", \"&plus;\")\n new_lines.append((line, inline))\n return new_lines\n\ndef addColor(lines:list, color:str):\n colortag = \"{\\color[RGB]{\" + color + \"}\"\n return [\"\"\"\\inline\"\"\" + colortag + line[0] + \"}\" if(line[1]) else colortag + line[0] + \"}\" for line in lines]\n\n\n\n\nif Path(\"config.toml\").exists():\n with open(\"config.toml\", \"r\") as loadconfig:\n config = toml.load(loadconfig)\n if config == {}:\n config = {\"colors\": [\"0, 0, 0\"], \"outputs\": [\"\"]}\nelse:\n config = {\"colors\": [\"0, 0, 0\"], \"outputs\": [\"\"]}\n\nwith open(\"tex.txt\", \"r\") as tex:\n imgs = tex.readlines()\n\nimgs = parseTex(imgs) #returns a list of tuples, [0] is the parsed text, [1] is an inline boolean\nfor i, color in enumerate(config[\"colors\"]):\n coloredimgs = addColor(imgs, color)\n output = \"output\" / Path(config[\"outputs\"][i])\n if (not output.exists()):\n output.mkdir()\n for j, tex in enumerate(coloredimgs):\n link = \"https://latex.codecogs.com/svg.latex?\" + tex\n print(link)\n r = requests.get(link)\n with open(output / (\"latex\" + str(j) + \".svg\"), \"wb\") as svg:\n svg.write(r.content)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" 进程对象属性 """ from multiprocessing import Process import time def tm(): for i in range(3): print(time.ctime()) time.sleep(2) p = Process(target=tm,name='Tarena') # 设置子进程随父进程退出 p.daemon = True p.start() print("Name:",p.name) # 进程名称 print("PID:",p.pid) # 进程PID print("is alive:",p.is_alive()) # 是否在生命周期
normal
{ "blob_id": "9d7bc2d93b855fbd22a4707a6237ac51069beb53", "index": 9385, "step-1": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\n<mask token>\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n", "step-3": "<mask token>\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm, name='Tarena')\np.daemon = True\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n", "step-4": "<mask token>\nfrom multiprocessing import Process\nimport time\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm, name='Tarena')\np.daemon = True\np.start()\nprint('Name:', p.name)\nprint('PID:', p.pid)\nprint('is alive:', p.is_alive())\n", "step-5": "\"\"\"\n进程对象属性\n\"\"\"\n\nfrom multiprocessing import Process\nimport time\n\n\ndef tm():\n for i in range(3):\n print(time.ctime())\n time.sleep(2)\n\n\np = Process(target=tm,name='Tarena')\n\n# 设置子进程随父进程退出\np.daemon = True\n\np.start()\nprint(\"Name:\",p.name) # 进程名称\nprint(\"PID:\",p.pid) # 进程PID\nprint(\"is alive:\",p.is_alive()) # 是否在生命周期", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django.db import models class Event(models.Model): name = models.TextField() host = models.TextField(null=True) fields = models.TextField(null=True) description = models.TextField(null=True) date = models.TextField() start_time = models.TextField() end_time = models.TextField() banner_path = models.TextField(null=True) status = models.IntegerField() reg = models.IntegerField() created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True)
normal
{ "blob_id": "170716ccaaf45db2ee974de260883a8d70513f52", "index": 7583, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Event(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Event(models.Model):\n name = models.TextField()\n host = models.TextField(null=True)\n fields = models.TextField(null=True)\n description = models.TextField(null=True)\n date = models.TextField()\n start_time = models.TextField()\n end_time = models.TextField()\n banner_path = models.TextField(null=True)\n status = models.IntegerField()\n reg = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n", "step-4": "from django.db import models\n\n\nclass Event(models.Model):\n name = models.TextField()\n host = models.TextField(null=True)\n fields = models.TextField(null=True)\n description = models.TextField(null=True)\n date = models.TextField()\n start_time = models.TextField()\n end_time = models.TextField()\n banner_path = models.TextField(null=True)\n status = models.IntegerField()\n reg = models.IntegerField()\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np import math import activations class FC_layer(): def __init__(self, input_size, output_size, weight_init_range, activation, debug): self.type = "FC" self.activation_name = activation self.shape = (input_size, output_size) self.activation = activations.get_activation_function(activation) self.d_activation = activations.get_activation_derivative(activation) self.input = None self.output = None self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size)) self.bias = np.random.rand(1,output_size) self.weights_grads = np.zeros(self.weights.shape) self.bias_grads = np.zeros(self.bias.shape) self.debug = debug def forward(self, input_activations): # Dot product of input with W plus bias. Cache, activate and return output = np.dot(input_activations, self.weights) + self.bias # Cache the weighted outputs and inputs #self.output = output self.input = input_activations # Pass the output throug the activation function output = self.activation(self, output) self.output = output return output def backward(self, jacobian_L_Z): # Get the jacobian linking the loss with respect of this layer output from the previous layer. # PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss # that will be passed to the previous activation layer and so on, up to layer previous input Y = self.input # Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function jacobian_Z_sum = self.create_jacobian_Z_sum() # Find the Weights gradients jacobian_L_W # Compute the simple jacobian linking the outputs and the weights simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal()) # Then compute the jacobian linking the loss to the weights jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W # Calculate the input layer loss jacobian_L_Y # by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around) jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T) jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y) # Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss jacobian_L_B = jacobian_L_Z # Now save the bias loss and weight loss (representing the calculated gradiants). # This will be updated at the end of the batch, or SGD self.weights_grads =self.weights_grads + jacobian_L_W self.bias_grads = self.bias_grads + jacobian_L_B #Finally return the calculated input loss --> this will be the output loss of the next layer return jacobian_L_Y def create_jacobian_Z_sum(self): return np.identity(self.output[0].size) * self.d_activation(self, self.output) def update_gradients(self, learning_rate, gradient_avg_factor = 1): #Update gradients, usefull when doing batch learning # Get the avg of the gradients (for SGD divide by 1, else divide by batchsize) ## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate #self.weights_grads = self.weights_grads / gradient_avg_factor #self.bias_grads = self.bias_grads / gradient_avg_factor # Update weights and biases self.weights -= learning_rate * self.weights_grads self.bias -= learning_rate * self.bias_grads self.weights_grads = np.zeros(self.weights.shape) self.bias_grads = np.zeros(self.bias.shape) def __str__(self): return "FC Layer type size = " + str(self.weights.shape) + " with activation = " + self.activation_name class conv2D(): def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug): self.type = "conv2D" self.input_shape = input_shape self.activation_name = activation #Kernel stack shape for the layer (N, I, K_x, K_y) self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1]) self.activation = activations.get_activation_function(activation) self.d_activation = activations.get_activation_derivative(activation) self.strides = strides self.modes = modes self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape) self.weights_grads = np.zeros(self.weights.shape) self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding() self.output_shape = self.calculate_output_shape() self.cached_calculation = {} self.cache_weights_input_output_triplet_locations() self.cached_output = None self.debug = debug '''print("###########################") a = np.random.randint(1,4,(6,6)) print(a) padded_a = self.apply_zero_padding(a) print(padded_a) print("kernel shape", (self.kernel_shape[2], self.kernel_shape[3])) print("input shape", a.shape) print("padded shape", padded_a.shape) print("###########################")''' def cache_weights_input_output_triplet_locations(self): placeholder_input = np.zeros(self.input_shape) array = placeholder_input[0] kernel = self.weights[0][0] stride_x_pointer = 0 while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1): stride_y_pointer = 0 #while the kernel does not go over the x-akse of the array while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1): #while the kernel does not go over the x-akse of the array #cache all touched weights and input for each kernel (output or Coordinates??) for row in range(kernel.shape[0]): for column in range(kernel.shape[1]): # Cache coordinate only: (weight, input) --> output #format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos) conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1]) self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate #Cache weight coordinate and input/output values # Update the stride long the y-axis stride_y_pointer += self.strides[1] #update the stride long the x-axis stride_x_pointer += self.strides[0] #End of convolution def forward(self, input_feature_maps): #reset the cached calculations from the previous forward pass #self.cached_calculation = {} output = np.zeros(self.output_shape) #Apply padding input_feature_maps = self.apply_zero_padding(input_feature_maps) for i in range(0, self.kernel_shape[0]): #for each kernel stack kernel_stack = self.weights[i] for j in range(0, self.kernel_shape[1]): #for each kernel in the kernel stack (or input channel) kernel = kernel_stack[j] array = input_feature_maps[j] stride_x_pointer = 0 conv_counter = 1 if self.debug: print("**** NEW CONVOLUTION ****") while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1): stride_y_pointer = 0 #while the kernel does not go over the x-akse of the array while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1): #while the kernel does not go over the x-akse of the array #Get the snip of the array to apply convolution on array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]] #apply convolution and get the result result = np.sum(np.multiply(array_snip, kernel)) #update the output tensor conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1]) output[conv_output_coordinate] += result '''#cache all the results, touched weights and input for each kernel (output or Coordinates??) for row in range(kernel.shape[0]): for column in range(kernel.shape[1]): # Cache coordinate only: (weight, input) --> output #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos) self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate #Cache weight coordinate and input/output values #ALTERNATIVE # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result''' if self.debug: print("convolution nr ", conv_counter ) print("\narray_snip: \n", array_snip) print("\nkernel: \n", kernel) print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel)) print("\nresult: ", result) # Update the stride long the y-axis stride_y_pointer += self.strides[1] conv_counter+=1 #update the stride long the x-axis stride_x_pointer += self.strides[0] #End of convolution if self.debug: print("\n----REVIEW----\n") print("Total convolutions: ", conv_counter) print("\ninput_feature_map:\n ", array) print("\napplied kernel:\n ", kernel) print("\nconvolution result:\n ", output[i]) print("***********************************") #Cache input and output self.cached_output = output self.cached_input = input_feature_maps #Apply activation output = self.activation(self, output) return output def backward(self, jacobian_L_Z): #Reshape J_LZ from FC to Conv2D and pass through activation layer jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape) #print("JLZ før relu\n", jacobian_L_Z) #jacobian_L_Z = self.d_activation(self, jacobian_L_Z) #print("cached out after activation\n", self.cached_output) jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output) #print("JLZ etter relu\n", jacobian_L_Z) # J_L_Z * f'(cached_output) #Calculate J_LW jacobian_L_W = self.compute_gradients(jacobian_L_Z) self.weights_grads += jacobian_L_W #Calculate J_LX jacobian_L_Y = self.compute_J_LY(jacobian_L_Z) #Pass Jacobian L Y upstream return jacobian_L_Y def update_gradients(self, learning_rate): self.weights -= learning_rate * self.weights_grads self.weights_grads = np.zeros(self.weights.shape) def compute_gradients(self, jacobian_L_Z): grads = np.zeros(self.weights.shape) #Iterate through all the weights (4 dimension) #Iterate through the kernel stacks for i in range(self.weights.shape[0]): #Iterate throught each kernel/input channel for j in range(self.weights.shape[1]): #iterate through the x-axis of the kernel for k in range(self.weights.shape[2]): #iterate through the y-axis of the kernel for l in range(self.weights.shape[3]): #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)} for key in self.cached_calculation.keys(): if key[0] == (k,l): grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]] return grads def compute_J_LY(self, jacobian_L_Z): jacobian_L_Y = np.zeros(self.input_shape) #Iterate through all the inputs (3 dimension) #iterate through all channels/kernel of a kernel stack for i in range(self.input_shape[0]): #iterate through x-akses of 2d input for j in range(self.input_shape[1]): #iterate through y-axes of 2d input for k in range(self.input_shape[2]): #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)} for key in self.cached_calculation.keys(): if key[1] == (j,k): #for each kernel-stack for l in range(self.weights.shape[0]): jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]] return jacobian_L_Y def calculate_output_shape(self): width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1) height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 ) return (self.kernel_shape[0], width, height) def calculate_padding(self): #Calculate padding long the x axis s = self.strides[0] f = self.kernel_shape[2] i = self.input_shape[1] if self.modes[0] == "full": #Every pixel must experience every weight of the kernel p_x_start = f - 1 p_x_stop = f - 1 elif self.modes[0] == "same": #Every pixel must experience the middle weight of the kernel p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2) p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2) else: p_x_start = 0 p_x_stop = 0 #Calculate padding long y axis s = self.strides[1] f = self.kernel_shape[3] i = self.input_shape[2] if self.modes[1] == "full": #Every pixel must experience every weight of the kernel p_y_start = f - 1 p_y_stop = f - 1 elif self.modes[1] == "same": #Every pixel must experience the middle weight of the kernel p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2) p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2) else: p_y_start = 0 p_y_stop = 0 return p_x_start, p_x_stop, p_y_start, p_y_stop def apply_zero_padding(self, input_feature_maps): # Apply zero padding to the input feature maps according to the modes, strides and kernel size padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop )) for channel in range(input_feature_maps.shape[0]): array = input_feature_maps[channel] #Create the background zero array padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop)) #Copy the array in the middle of the zero background padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array #Save the array padded_input_feature_maps[channel] = padded_array return padded_input_feature_maps def __str__(self): return "Conv 2D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " strides= s" + str(self.strides) + " modes= " + str(self.modes) +" with activation = " + self.activation_name class conv1D(): def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug): self.type = "conv1D" self.input_shape = input_shape self.activation_name = activation #Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)' self.kernel_shape = (n_kernels, input_shape[0], kernel_shape) self.activation = activations.get_activation_function(activation) self.d_activation = activations.get_activation_derivative(activation) self.stride = stride self.mode = mode self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape) self.weights_grads = np.zeros(self.weights.shape) self.p_x_start, self.p_x_stop = self.calculate_padding() self.output_shape = self.calculate_output_shape() self.cached_calculation = {} self.cache_weights_input_output_triplet_locations() self.cached_output = None self.debug = debug def cache_weights_input_output_triplet_locations(self): #Performe an empty convolution and cache all the position of the kernel, input and output triplet placeholder_input = np.zeros(self.input_shape) array = placeholder_input[0] kernel = self.weights[0][0] stride_x_pointer = 0 while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1): #while the kernel does not go over the x-akse of the array #cache all touched weights and input for each kernel for column in range(kernel.shape[0]): # Cache coordinate only: (weight, input) --> output #format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos) conv_output_coordinate = (stride_x_pointer // self.stride) self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate #Cache weight coordinate and input/output values #update the stride long the x-axis stride_x_pointer += self.stride #End of convolution def forward(self, input_feature_maps): output = np.zeros(self.output_shape) #Apply padding input_feature_maps = self.apply_zero_padding(input_feature_maps) for i in range(0, self.kernel_shape[0]): #for each kernel stack kernel_stack = self.weights[i] for j in range(0, self.kernel_shape[1]): #for each kernel in the kernel stack (or input channel) kernel = kernel_stack[j] array = input_feature_maps[j] stride_x_pointer = 0 conv_counter = 1 if self.debug: print("**** NEW CONVOLUTION ****") while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1): #while the kernel does not go over the x-akse of the array #Get the snip of the array to apply convolution on array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]] #apply convolution and get the result result = np.sum(np.multiply(array_snip, kernel)) #update the output tensor conv_output_coordinate = (i, stride_x_pointer // self.stride) output[conv_output_coordinate] += result if self.debug: print("convolution nr ", conv_counter ) print("\narray_snip: \n", array_snip) print("\nkernel: \n", kernel) print("\nelementwise multiplication: \n", np.multiply(array_snip, kernel)) print("\nresult: ", result) conv_counter+=1 #update the stride long the x-axis stride_x_pointer += self.stride #End of convolution if self.debug: print("\n----REVIEW----\n") print("Total convolutions: ", conv_counter) print("\ninput_feature_map:\n ", array) print("\napplied kernel:\n ", kernel) print("\nconvolution result:\n ", output[i]) print("***********************************") #Cache input and output self.cached_output = output self.cached_input = input_feature_maps #Apply activation output = self.activation(self, output) return output def backward(self, jacobian_L_Z): #Reshape J_LZ from FC to Conv2D and pass through activation layer jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape) #print("JLZ før relu\n", jacobian_L_Z) #jacobian_L_Z = self.d_activation(self, jacobian_L_Z) #print("cached out after activation\n", self.cached_output) jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output) #print("JLZ etter relu\n", jacobian_L_Z) # J_L_Z * f'(cached_output) #Calculate J_LW jacobian_L_W = self.compute_gradients(jacobian_L_Z) self.weights_grads += jacobian_L_W #Calculate J_LX jacobian_L_Y = self.compute_J_LY(jacobian_L_Z) #Pass Jacobian L Y upstream return jacobian_L_Y def update_gradients(self, learning_rate): self.weights -= learning_rate * self.weights_grads self.weights_grads = np.zeros(self.weights.shape) def compute_gradients(self, jacobian_L_Z): grads = np.zeros(self.weights.shape) #Iterate through all the weights (3 dimension) for i in range(self.weights.shape[0]): for j in range(self.weights.shape[1]): for k in range(self.weights.shape[2]): for key in self.cached_calculation.keys(): if key[0] == k: grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]] return grads def compute_J_LY(self, jacobian_L_Z): jacobian_L_Y = np.zeros(self.input_shape) #Iterate through all the inputs (3 dimension) #iterate through all channels/kernel of a kernel stack for i in range(self.input_shape[0]): #iterate through x-akses of 1d input for j in range(self.input_shape[1]): for key in self.cached_calculation.keys(): if key[1] == j: #for each kernel-stack for l in range(self.weights.shape[0]): jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]] return jacobian_L_Y def calculate_output_shape(self): width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1) return (self.kernel_shape[0], width) def calculate_padding(self): #Calculate padding long the x axis s = self.stride f = self.kernel_shape[2] i = self.input_shape[1] if self.mode == "full": #Every pixel must experience every weight of the kernel p_x_start = f - 1 p_x_stop = f - 1 elif self.mode == "same": #Every pixel must experience the middle weight of the kernel p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2) p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2) else: p_x_start = 0 p_x_stop = 0 return p_x_start, p_x_stop def apply_zero_padding(self, input_feature_maps): # Apply zero padding to the input feature maps according to the modes, strides and kernel size #if self.p_x_start == 0 and self.p_x_stop == 0: # return input_feature_maps padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop)) for channel in range(input_feature_maps.shape[0]): array = input_feature_maps[channel] #Create the background zero array padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop)) #Copy the array in the middle of the zero background padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array #Save the array padded_input_feature_maps[channel] = padded_array return padded_input_feature_maps def __str__(self): return "Conv 1D Layer type with "+ str(self.kernel_shape[0]) +" kernels of shape = " + str(self.kernel_shape[1:]) +"input/output of shape" + str(self.input_shape)+"/" + str(self.output_shape) + " stride= " + str(self.stride) + " mode= " + str(self.mode) +" with activation = " + self.activation_name class softmax(): def __init__(self, size): self.size = size self.shape = (1, size) self.type = "softmax" self.activation_function = activations.softmax def forward(self, input_data): return self.activation_function(self, input_data) def backward(self, jacobian_L_S, softmaxed_network_output): # Create jacobian of derivate of softmax jacobian_soft = self.compute_j_soft(softmaxed_network_output) # Compute jacobian linking Loss to output jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft) return jacobian_L_Z def compute_j_soft(self, S): S = np.squeeze(S) n = len(S) j_soft = np.zeros((n,n)) for i in range(n): for j in range(n): if i == j: j_soft[i][j] = S[i] - S[i]**2 else: j_soft[i][j] = -S[i]*S[j] return j_soft def __str__(self): return "Softmax Layer of size = " + str(self.size)
normal
{ "blob_id": "ff99b5fd168d7987e488d7f6d0455619e988f15a", "index": 3574, "step-1": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n", "step-2": "<mask token>\n\n\nclass conv2D:\n <mask token>\n <mask token>\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n", "step-3": "<mask token>\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n", "step-4": "<mask token>\n\n\nclass FC_layer:\n <mask token>\n <mask token>\n\n def backward(self, jacobian_L_Z):\n Y = self.input\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n jacobian_Z_Y = np.dot(jacobian_Z_sum, self.weights.T)\n jacobian_L_Y = np.dot(jacobian_L_Z, jacobian_Z_Y)\n jacobian_L_B = jacobian_L_Z\n self.weights_grads = self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n return jacobian_L_Y\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'FC Layer type size = ' + str(self.weights.shape\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv2D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes,\n weight_init_range, activation, debug):\n self.type = 'conv2D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape[0\n ], kernel_shape[1]\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = (self\n .calculate_padding())\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n \"\"\"print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")\"\"\"\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1] - 1 <= array.shape[1] - 1:\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n conv_output_coordinate = (stride_x_pointer // self.\n strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[(row, column), (row +\n stride_x_pointer, column + stride_y_pointer)\n ] = conv_output_coordinate\n stride_y_pointer += self.strides[1]\n stride_x_pointer += self.strides[0]\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n stride_y_pointer = 0\n while stride_y_pointer + kernel.shape[1\n ] - 1 <= array.shape[1] - 1:\n array_snip = array[stride_x_pointer:\n stride_x_pointer + kernel.shape[0],\n stride_y_pointer:stride_y_pointer + kernel.shape[1]\n ]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = (i, stride_x_pointer //\n self.strides[0], stride_y_pointer // self.\n strides[1])\n output[conv_output_coordinate] += result\n \"\"\"#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result\"\"\"\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n stride_y_pointer += self.strides[1]\n conv_counter += 1\n stride_x_pointer += self.strides[0]\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for l in range(self.weights.shape[3]):\n for key in self.cached_calculation.keys():\n if key[0] == (k, l):\n grads[i, j, k, l] += self.cached_input[j][key\n [1]] * jacobian_L_Z[i][self.\n cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for k in range(self.input_shape[2]):\n for key in self.cached_calculation.keys():\n if key[1] == (j, k):\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j, k] += self.weights[l][i][key\n [0]] * jacobian_L_Z[l][self.\n cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] +\n self.p_y_start + self.p_y_stop) / self.strides[1] + 1)\n return self.kernel_shape[0], width, height\n\n def calculate_padding(self):\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == 'full':\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == 'same':\n p_y_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_y_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_y_start = 0\n p_y_stop = 0\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, \n input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self\n .p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start,\n self.p_y_start:array.shape[1] + self.p_y_start] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 2D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' strides= s' + str(self.strides\n ) + ' modes= ' + str(self.modes\n ) + ' with activation = ' + self.activation_name\n\n\nclass conv1D:\n\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode,\n weight_init_range, activation, debug):\n self.type = 'conv1D'\n self.input_shape = input_shape\n self.activation_name = activation\n self.kernel_shape = n_kernels, input_shape[0], kernel_shape\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high=\n weight_init_range[1], size=self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1:\n for column in range(kernel.shape[0]):\n conv_output_coordinate = stride_x_pointer // self.stride\n self.cached_calculation[column, column + stride_x_pointer\n ] = conv_output_coordinate\n stride_x_pointer += self.stride\n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print('**** NEW CONVOLUTION ****')\n while stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0\n ] - 1:\n array_snip = array[stride_x_pointer:stride_x_pointer +\n kernel.shape[0]]\n result = np.sum(np.multiply(array_snip, kernel))\n conv_output_coordinate = i, stride_x_pointer // self.stride\n output[conv_output_coordinate] += result\n if self.debug:\n print('convolution nr ', conv_counter)\n print('\\narray_snip: \\n', array_snip)\n print('\\nkernel: \\n', kernel)\n print('\\nelementwise multiplication: \\n', np.\n multiply(array_snip, kernel))\n print('\\nresult: ', result)\n conv_counter += 1\n stride_x_pointer += self.stride\n if self.debug:\n print('\\n----REVIEW----\\n')\n print('Total convolutions: ', conv_counter)\n print('\\ninput_feature_map:\\n ', array)\n print('\\napplied kernel:\\n ', kernel)\n print('\\nconvolution result:\\n ', output[i])\n print('***********************************')\n self.cached_output = output\n self.cached_input = input_feature_maps\n output = self.activation(self, output)\n return output\n\n def backward(self, jacobian_L_Z):\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.\n cached_output)\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n return jacobian_L_Y\n\n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[i, j, k] += self.cached_input[j][key[1]\n ] * jacobian_L_Z[i][self.cached_calculation\n [key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n for i in range(self.input_shape[0]):\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[i, j] += self.weights[l][i][key[0]\n ] * jacobian_L_Z[l][self.cached_calculation\n [key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] +\n self.p_x_start + self.p_x_stop) / self.stride + 1)\n return self.kernel_shape[0], width\n\n def calculate_padding(self):\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == 'full':\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == 'same':\n p_x_start = math.floor((s * math.ceil(i / s) - i + f - s) / 2)\n p_x_stop = math.ceil((s * math.ceil(i / s) - i + f - s) / 2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n\n def apply_zero_padding(self, input_feature_maps):\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], \n input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n padded_array = np.zeros(array.shape[0] + self.p_x_start + self.\n p_x_stop)\n padded_array[self.p_x_start:array.shape[0] + self.p_x_start\n ] = array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return 'Conv 1D Layer type with ' + str(self.kernel_shape[0]\n ) + ' kernels of shape = ' + str(self.kernel_shape[1:]\n ) + 'input/output of shape' + str(self.input_shape) + '/' + str(\n self.output_shape) + ' stride= ' + str(self.stride\n ) + ' mode= ' + str(self.mode\n ) + ' with activation = ' + self.activation_name\n\n\nclass softmax:\n\n def __init__(self, size):\n self.size = size\n self.shape = 1, size\n self.type = 'softmax'\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n jacobian_soft = self.compute_j_soft(softmaxed_network_output)\n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n, n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i] ** 2\n else:\n j_soft[i][j] = -S[i] * S[j]\n return j_soft\n\n def __str__(self):\n return 'Softmax Layer of size = ' + str(self.size)\n", "step-5": "import numpy as np\nimport math\nimport activations\n\nclass FC_layer():\n def __init__(self, input_size, output_size, weight_init_range, activation, debug):\n self.type = \"FC\"\n self.activation_name = activation\n self.shape = (input_size, output_size)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.input = None\n self.output = None\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size=(input_size, output_size))\n self.bias = np.random.rand(1,output_size)\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n self.debug = debug\n\n def forward(self, input_activations):\n # Dot product of input with W plus bias. Cache, activate and return\n output = np.dot(input_activations, self.weights) + self.bias\n # Cache the weighted outputs and inputs\n #self.output = output\n self.input = input_activations\n # Pass the output throug the activation function\n output = self.activation(self, output)\n self.output = output\n return output\n \n def backward(self, jacobian_L_Z):\n # Get the jacobian linking the loss with respect of this layer output from the previous layer.\n # PURPOSE: Calculate the weights gradients, the bias gradient and the input_loss\n # that will be passed to the previous activation layer and so on, up to layer previous input\n Y = self.input\n # Create the jacobian J_Z_sum with the layer cached outputs and the derivative of activation function\n jacobian_Z_sum = self.create_jacobian_Z_sum()\n\n # Find the Weights gradients jacobian_L_W\n # Compute the simple jacobian linking the outputs and the weights\n simp_jacobian_Z_W = np.outer(Y, jacobian_Z_sum.diagonal())\n # Then compute the jacobian linking the loss to the weights\n jacobian_L_W = jacobian_L_Z * simp_jacobian_Z_W\n\n # Calculate the input layer loss jacobian_L_Y\n # by doing dot product of output layer loss and the weigths matrix transposed (so to invert M N to N M, where M < N, we go the other way around)\n jacobian_Z_Y = np.dot(jacobian_Z_sum ,self.weights.T)\n jacobian_L_Y = np.dot( jacobian_L_Z, jacobian_Z_Y)\n \n\n # Bias loss is the as the output loss --> the bias influence on the loss == layer activation output influence on the loss\n jacobian_L_B = jacobian_L_Z\n\n # Now save the bias loss and weight loss (representing the calculated gradiants).\n # This will be updated at the end of the batch, or SGD\n self.weights_grads =self.weights_grads + jacobian_L_W\n self.bias_grads = self.bias_grads + jacobian_L_B\n \n #Finally return the calculated input loss --> this will be the output loss of the next layer\n return jacobian_L_Y\n\n def create_jacobian_Z_sum(self):\n return np.identity(self.output[0].size) * self.d_activation(self, self.output)\n\n def update_gradients(self, learning_rate, gradient_avg_factor = 1):\n #Update gradients, usefull when doing batch learning\n # Get the avg of the gradients (for SGD divide by 1, else divide by batchsize)\n ## UPDATE: removed the division by batchsize: Implemented this factor in the learning rate\n #self.weights_grads = self.weights_grads / gradient_avg_factor\n #self.bias_grads = self.bias_grads / gradient_avg_factor\n\n # Update weights and biases\n self.weights -= learning_rate * self.weights_grads\n self.bias -= learning_rate * self.bias_grads\n self.weights_grads = np.zeros(self.weights.shape)\n self.bias_grads = np.zeros(self.bias.shape)\n\n\n def __str__(self):\n return \"FC Layer type size = \" + str(self.weights.shape) + \" with activation = \" + self.activation_name\n\nclass conv2D():\n def __init__(self, input_shape, n_kernels, kernel_shape, strides, modes, weight_init_range, activation, debug):\n self.type = \"conv2D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (N, I, K_x, K_y)\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape[0], kernel_shape[1])\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.strides = strides\n self.modes = modes\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop, self.p_y_start, self.p_y_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n \n \n '''print(\"###########################\")\n a = np.random.randint(1,4,(6,6))\n print(a)\n padded_a = self.apply_zero_padding(a)\n print(padded_a)\n print(\"kernel shape\", (self.kernel_shape[2], self.kernel_shape[3]))\n print(\"input shape\", a.shape)\n print(\"padded shape\", padded_a.shape)\n print(\"###########################\")'''\n\n def cache_weights_input_output_triplet_locations(self):\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos, weight_y_pos), (input_x_pos, input_y_pos)) ---> (output_x_pos, output_y_pos)\n conv_output_coordinate = (stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n self.cached_calculation[((row, column), (row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n #reset the cached calculations from the previous forward pass\n #self.cached_calculation = {}\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n stride_y_pointer = 0\n #while the kernel does not go over the x-akse of the array\n while(stride_y_pointer + kernel.shape[1] -1 <= array.shape[1] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0], stride_y_pointer: stride_y_pointer + kernel.shape[1]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.strides[0], stride_y_pointer // self.strides[1])\n output[conv_output_coordinate] += result\n '''#cache all the results, touched weights and input for each kernel (output or Coordinates??)\n for row in range(kernel.shape[0]):\n for column in range(kernel.shape[1]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), (input_channel, input_x_pos, input_y_pos)) ---> (feature_map_number, output_x_pos, output_y_pos)\n self.cached_calculation[((i, j, row, column), (j, row + stride_x_pointer , column + stride_y_pointer))] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #ALTERNATIVE\n # format: key ((kernel_stack_number, 2D_kernel_number, weight_x_pos, weight_y_pos), input_val) ---> output_val\n #self.cached_calculation[((i, j, row, column), array_snip[row, column])] = result'''\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n # Update the stride long the y-axis\n stride_y_pointer += self.strides[1]\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.strides[0]\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (4 dimension)\n #Iterate through the kernel stacks\n for i in range(self.weights.shape[0]):\n #Iterate throught each kernel/input channel\n for j in range(self.weights.shape[1]):\n #iterate through the x-axis of the kernel\n for k in range(self.weights.shape[2]):\n #iterate through the y-axis of the kernel\n for l in range(self.weights.shape[3]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[0] == (k,l):\n grads[(i,j,k,l)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 2d input\n for j in range(self.input_shape[1]):\n #iterate through y-axes of 2d input\n for k in range(self.input_shape[2]):\n #cached_data = {k: v for k,v in self.cached_calculation.items() if k[0] == (i,j,k,l)}\n for key in self.cached_calculation.keys():\n if key[1] == (j,k):\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j,k)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n \n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.strides[0] + 1)\n height = math.floor((self.input_shape[2] - self.kernel_shape[3] + self.p_y_start + self.p_y_stop)/self.strides[1] + 1 )\n return (self.kernel_shape[0], width, height)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.strides[0]\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.modes[0] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.modes[0] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n\n\n #Calculate padding long y axis\n s = self.strides[1]\n f = self.kernel_shape[3]\n i = self.input_shape[2]\n if self.modes[1] == \"full\":\n #Every pixel must experience every weight of the kernel\n p_y_start = f - 1\n p_y_stop = f - 1\n elif self.modes[1] == \"same\":\n #Every pixel must experience the middle weight of the kernel\n p_y_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_y_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_y_start = 0\n p_y_stop = 0\n\n\n return p_x_start, p_x_stop, p_y_start, p_y_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop, input_feature_maps.shape[2] + self.p_y_start + self.p_y_stop ))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop, array.shape[1] + self.p_y_start + self.p_y_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start, self.p_y_start:array.shape[1]+ self.p_y_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 2D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" strides= s\" + str(self.strides) + \" modes= \" + str(self.modes) +\" with activation = \" + self.activation_name\n\nclass conv1D():\n def __init__(self, input_shape, n_kernels, kernel_shape, stride, mode, weight_init_range, activation, debug):\n self.type = \"conv1D\"\n self.input_shape = input_shape\n self.activation_name = activation\n #Kernel stack shape for the layer (Num_kernel_stacks, Channels, Kernel_x)'\n self.kernel_shape = (n_kernels, input_shape[0], kernel_shape)\n self.activation = activations.get_activation_function(activation)\n self.d_activation = activations.get_activation_derivative(activation)\n self.stride = stride\n self.mode = mode\n self.weights = np.random.uniform(low=weight_init_range[0], high= weight_init_range[1], size= self.kernel_shape)\n self.weights_grads = np.zeros(self.weights.shape)\n self.p_x_start, self.p_x_stop = self.calculate_padding()\n self.output_shape = self.calculate_output_shape()\n self.cached_calculation = {}\n self.cache_weights_input_output_triplet_locations()\n self.cached_output = None\n self.debug = debug\n\n def cache_weights_input_output_triplet_locations(self):\n #Performe an empty convolution and cache all the position of the kernel, input and output triplet\n placeholder_input = np.zeros(self.input_shape)\n array = placeholder_input[0]\n kernel = self.weights[0][0]\n stride_x_pointer = 0\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #cache all touched weights and input for each kernel\n for column in range(kernel.shape[0]):\n # Cache coordinate only: (weight, input) --> output\n #format: key ((weight_x_pos), (input_x_pos)) ---> (output_x_pos)\n conv_output_coordinate = (stride_x_pointer // self.stride)\n self.cached_calculation[(column, column + stride_x_pointer)] = conv_output_coordinate\n #Cache weight coordinate and input/output values\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n \n\n def forward(self, input_feature_maps):\n output = np.zeros(self.output_shape)\n #Apply padding\n input_feature_maps = self.apply_zero_padding(input_feature_maps)\n for i in range(0, self.kernel_shape[0]):\n #for each kernel stack\n kernel_stack = self.weights[i]\n for j in range(0, self.kernel_shape[1]):\n #for each kernel in the kernel stack (or input channel)\n kernel = kernel_stack[j]\n array = input_feature_maps[j]\n stride_x_pointer = 0\n conv_counter = 1\n if self.debug:\n print(\"**** NEW CONVOLUTION ****\")\n while(stride_x_pointer + kernel.shape[0] - 1 <= array.shape[0] - 1):\n #while the kernel does not go over the x-akse of the array\n #Get the snip of the array to apply convolution on\n array_snip = array[stride_x_pointer: stride_x_pointer + kernel.shape[0]]\n #apply convolution and get the result \n result = np.sum(np.multiply(array_snip, kernel)) \n #update the output tensor\n conv_output_coordinate = (i, stride_x_pointer // self.stride)\n output[conv_output_coordinate] += result\n if self.debug:\n print(\"convolution nr \", conv_counter )\n print(\"\\narray_snip: \\n\", array_snip)\n print(\"\\nkernel: \\n\", kernel)\n print(\"\\nelementwise multiplication: \\n\", np.multiply(array_snip, kernel))\n print(\"\\nresult: \", result)\n conv_counter+=1\n #update the stride long the x-axis\n stride_x_pointer += self.stride\n #End of convolution\n if self.debug:\n print(\"\\n----REVIEW----\\n\")\n print(\"Total convolutions: \", conv_counter)\n print(\"\\ninput_feature_map:\\n \", array)\n print(\"\\napplied kernel:\\n \", kernel)\n print(\"\\nconvolution result:\\n \", output[i])\n print(\"***********************************\")\n #Cache input and output\n self.cached_output = output\n self.cached_input = input_feature_maps\n #Apply activation\n output = self.activation(self, output)\n return output\n \n \n def backward(self, jacobian_L_Z):\n #Reshape J_LZ from FC to Conv2D and pass through activation layer\n jacobian_L_Z = jacobian_L_Z.reshape(self.output_shape)\n #print(\"JLZ før relu\\n\", jacobian_L_Z)\n #jacobian_L_Z = self.d_activation(self, jacobian_L_Z)\n #print(\"cached out after activation\\n\", self.cached_output)\n jacobian_L_Z = jacobian_L_Z * self.d_activation(self, self.cached_output)\n #print(\"JLZ etter relu\\n\", jacobian_L_Z)\n # J_L_Z * f'(cached_output)\n\n #Calculate J_LW\n jacobian_L_W = self.compute_gradients(jacobian_L_Z)\n self.weights_grads += jacobian_L_W\n\n #Calculate J_LX\n jacobian_L_Y = self.compute_J_LY(jacobian_L_Z)\n\n #Pass Jacobian L Y upstream\n return jacobian_L_Y\n \n def update_gradients(self, learning_rate):\n self.weights -= learning_rate * self.weights_grads\n self.weights_grads = np.zeros(self.weights.shape)\n\n def compute_gradients(self, jacobian_L_Z):\n grads = np.zeros(self.weights.shape)\n #Iterate through all the weights (3 dimension)\n for i in range(self.weights.shape[0]):\n for j in range(self.weights.shape[1]):\n for k in range(self.weights.shape[2]):\n for key in self.cached_calculation.keys():\n if key[0] == k:\n grads[(i,j,k)] += self.cached_input[j][key[1]] * jacobian_L_Z[i][self.cached_calculation[key]]\n return grads\n\n def compute_J_LY(self, jacobian_L_Z):\n jacobian_L_Y = np.zeros(self.input_shape)\n #Iterate through all the inputs (3 dimension)\n #iterate through all channels/kernel of a kernel stack\n for i in range(self.input_shape[0]):\n #iterate through x-akses of 1d input\n for j in range(self.input_shape[1]):\n for key in self.cached_calculation.keys():\n if key[1] == j:\n #for each kernel-stack\n for l in range(self.weights.shape[0]):\n jacobian_L_Y[(i,j)] += self.weights[l][i][key[0]] * jacobian_L_Z[l][self.cached_calculation[key]]\n return jacobian_L_Y\n\n def calculate_output_shape(self):\n width = math.floor((self.input_shape[1] - self.kernel_shape[2] + self.p_x_start + self.p_x_stop)/self.stride + 1)\n return (self.kernel_shape[0], width)\n\n def calculate_padding(self):\n #Calculate padding long the x axis\n s = self.stride\n f = self.kernel_shape[2]\n i = self.input_shape[1]\n if self.mode == \"full\":\n #Every pixel must experience every weight of the kernel\n p_x_start = f - 1\n p_x_stop = f - 1\n elif self.mode == \"same\":\n\n #Every pixel must experience the middle weight of the kernel\n p_x_start = math.floor((s*math.ceil(i/s)-i+f-s)/2)\n p_x_stop = math.ceil((s*math.ceil(i/s)-i+f-s)/2)\n else:\n p_x_start = 0\n p_x_stop = 0\n return p_x_start, p_x_stop\n \n def apply_zero_padding(self, input_feature_maps):\n # Apply zero padding to the input feature maps according to the modes, strides and kernel size\n #if self.p_x_start == 0 and self.p_x_stop == 0:\n # return input_feature_maps\n padded_input_feature_maps = np.zeros((input_feature_maps.shape[0], input_feature_maps.shape[1] + self.p_x_start + self.p_x_stop))\n for channel in range(input_feature_maps.shape[0]):\n array = input_feature_maps[channel]\n #Create the background zero array\n padded_array = np.zeros((array.shape[0] + self.p_x_start + self.p_x_stop))\n #Copy the array in the middle of the zero background\n padded_array[self.p_x_start:array.shape[0]+ self.p_x_start] = array \n #Save the array\n padded_input_feature_maps[channel] = padded_array\n return padded_input_feature_maps\n\n def __str__(self):\n return \"Conv 1D Layer type with \"+ str(self.kernel_shape[0]) +\" kernels of shape = \" + str(self.kernel_shape[1:]) +\"input/output of shape\" + str(self.input_shape)+\"/\" + str(self.output_shape) + \" stride= \" + str(self.stride) + \" mode= \" + str(self.mode) +\" with activation = \" + self.activation_name\n\nclass softmax():\n def __init__(self, size):\n self.size = size\n self.shape = (1, size)\n self.type = \"softmax\"\n self.activation_function = activations.softmax\n\n def forward(self, input_data):\n return self.activation_function(self, input_data)\n\n def backward(self, jacobian_L_S, softmaxed_network_output):\n # Create jacobian of derivate of softmax\n jacobian_soft = self.compute_j_soft(softmaxed_network_output) \n # Compute jacobian linking Loss to output \n jacobian_L_Z = np.dot(jacobian_L_S, jacobian_soft)\n return jacobian_L_Z\n\n def compute_j_soft(self, S):\n S = np.squeeze(S)\n n = len(S)\n j_soft = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n if i == j:\n j_soft[i][j] = S[i] - S[i]**2\n else:\n j_soft[i][j] = -S[i]*S[j]\n return j_soft\n\n def __str__(self):\n return \"Softmax Layer of size = \" + str(self.size)\n\n", "step-ids": [ 24, 25, 28, 33, 39 ] }
[ 24, 25, 28, 33, 39 ]
''' mock_proto.py ''' from heron.common.src.python import constants import heron.proto.execution_state_pb2 as protoEState import heron.proto.physical_plan_pb2 as protoPPlan import heron.proto.tmaster_pb2 as protoTmaster import heron.proto.topology_pb2 as protoTopology # pylint: disable=no-self-use, missing-docstring class MockProto(object): ''' Mocking Proto''' topology_name = "mock_topology_name" topology_id = "mock_topology_id" cluster = "mock_topology_cluster" environ = "mock_topology_environ" def create_mock_spout(self, spout_name, output_streams, spout_parallelism): spout = protoTopology.Spout() spout.comp.name = spout_name kv = spout.comp.config.kvs.add() kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE') kv.value = str(spout_parallelism) for stream in output_streams: spout.outputs.add().stream.CopyFrom(stream) return spout def create_mock_bolt(self, bolt_name, input_streams, output_streams, bolt_parallelism): bolt = protoTopology.Bolt() bolt.comp.name = bolt_name kv = bolt.comp.config.kvs.add() kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE') kv.value = str(bolt_parallelism) for stream in input_streams: bolt.inputs.add().stream.CopyFrom(stream) for stream in output_streams: bolt.outputs.add().stream.CopyFrom(stream) return bolt def create_mock_simple_topology( self, spout_parallelism=1, bolt_parallelism=1): """ Simple topology contains one spout and one bolt. """ topology = protoTopology.Topology() topology.id = MockProto.topology_id topology.name = MockProto.topology_name # Stream1 stream1 = protoTopology.StreamId() stream1.id = "mock_stream1" stream1.component_name = "mock_spout" # Spout1 spout = self.create_mock_spout("mock_spout", [stream1], spout_parallelism) topology.spouts.extend([spout]) # Bolt1 bolt = self.create_mock_bolt("mock_bolt", [stream1], [], bolt_parallelism) topology.bolts.extend([bolt]) return topology def create_mock_medium_topology( self, spout_parallelism=1, bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1): """ Medium topology is a three stage topology with one spout, two mid stage bolts, and one last stage bolt. S -str1-> B1 -str3-> B3 S -str2-> B2 -str4-> B3 """ topology = protoTopology.Topology() topology.id = "mock_topology_id" topology.name = "mock_topology_name" # Streams stream1 = protoTopology.StreamId() stream1.id = "mock_stream1" stream1.component_name = "mock_spout1" stream2 = protoTopology.StreamId() stream2.id = "mock_stream2" stream2.component_name = "mock_spout1" stream3 = protoTopology.StreamId() stream3.id = "mock_stream3" stream3.component_name = "mock_bolt1" stream4 = protoTopology.StreamId() stream4.id = "mock_stream4" stream4.component_name = "mock_bolt2" # Spouts spout1 = self.create_mock_spout("mock_spout1", [stream1, stream2], spout_parallelism) topology.spouts.extend([spout1]) # Bolts bolt1 = self.create_mock_bolt("mock_bolt1", [stream1], [stream3], bolt1_parallelism) bolt2 = self.create_mock_bolt("mock_bolt2", [stream2], [stream4], bolt2_parallelism) bolt3 = self.create_mock_bolt("mock_bolt3", [stream3, stream4], [], bolt3_parallelism) topology.bolts.extend([bolt1, bolt2, bolt3]) return topology def create_mock_simple_physical_plan( self, spout_parallelism=1, bolt_parallelism=1): pplan = protoPPlan.PhysicalPlan() pplan.topology.CopyFrom(self.create_mock_simple_topology( spout_parallelism, bolt_parallelism)) return pplan def create_mock_medium_physical_plan( self, spout_parallelism=1, bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1): pplan = protoPPlan.PhysicalPlan() pplan.topology.CopyFrom(self.create_mock_medium_topology( spout_parallelism, bolt1_parallelism, bolt2_parallelism, bolt3_parallelism)) return pplan def create_mock_execution_state(self): estate = protoEState.ExecutionState() estate.topology_name = MockProto.topology_name estate.topology_id = MockProto.topology_id estate.cluster = MockProto.cluster estate.environ = MockProto.environ return estate def create_mock_tmaster(self): tmaster = protoTmaster.TMasterLocation() return tmaster def add_topology_config(self, topology, key, value): kv = topology.topology_config.kvs.add() kv.key = key kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE') kv.value = str(value)
normal
{ "blob_id": "002ef36bd132f1ac258b3f8baf8098accbd8a8f2", "index": 6839, "step-1": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n <mask token>\n", "step-2": "<mask token>\n\n\nclass MockProto(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n", "step-3": "<mask token>\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n", "step-4": "<mask token>\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n\nclass MockProto(object):\n \"\"\" Mocking Proto\"\"\"\n topology_name = 'mock_topology_name'\n topology_id = 'mock_topology_id'\n cluster = 'mock_topology_cluster'\n environ = 'mock_topology_environ'\n\n def create_mock_spout(self, spout_name, output_streams, spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self, bolt_name, input_streams, output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(self, spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout'\n spout = self.create_mock_spout('mock_spout', [stream1],\n spout_parallelism)\n topology.spouts.extend([spout])\n bolt = self.create_mock_bolt('mock_bolt', [stream1], [],\n bolt_parallelism)\n topology.bolts.extend([bolt])\n return topology\n\n def create_mock_medium_topology(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = 'mock_topology_id'\n topology.name = 'mock_topology_name'\n stream1 = protoTopology.StreamId()\n stream1.id = 'mock_stream1'\n stream1.component_name = 'mock_spout1'\n stream2 = protoTopology.StreamId()\n stream2.id = 'mock_stream2'\n stream2.component_name = 'mock_spout1'\n stream3 = protoTopology.StreamId()\n stream3.id = 'mock_stream3'\n stream3.component_name = 'mock_bolt1'\n stream4 = protoTopology.StreamId()\n stream4.id = 'mock_stream4'\n stream4.component_name = 'mock_bolt2'\n spout1 = self.create_mock_spout('mock_spout1', [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n bolt1 = self.create_mock_bolt('mock_bolt1', [stream1], [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt('mock_bolt2', [stream2], [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt('mock_bolt3', [stream3, stream4], [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n return topology\n\n def create_mock_simple_physical_plan(self, spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism, bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(self, spout_parallelism=1,\n bolt1_parallelism=1, bolt2_parallelism=1, bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism, bolt1_parallelism, bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n", "step-5": "''' mock_proto.py '''\nfrom heron.common.src.python import constants\nimport heron.proto.execution_state_pb2 as protoEState\nimport heron.proto.physical_plan_pb2 as protoPPlan\nimport heron.proto.tmaster_pb2 as protoTmaster\nimport heron.proto.topology_pb2 as protoTopology\n\n# pylint: disable=no-self-use, missing-docstring\nclass MockProto(object):\n ''' Mocking Proto'''\n topology_name = \"mock_topology_name\"\n topology_id = \"mock_topology_id\"\n cluster = \"mock_topology_cluster\"\n environ = \"mock_topology_environ\"\n\n def create_mock_spout(self,\n spout_name,\n output_streams,\n spout_parallelism):\n spout = protoTopology.Spout()\n spout.comp.name = spout_name\n kv = spout.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(spout_parallelism)\n for stream in output_streams:\n spout.outputs.add().stream.CopyFrom(stream)\n return spout\n\n def create_mock_bolt(self,\n bolt_name,\n input_streams,\n output_streams,\n bolt_parallelism):\n bolt = protoTopology.Bolt()\n bolt.comp.name = bolt_name\n kv = bolt.comp.config.kvs.add()\n kv.key = constants.TOPOLOGY_COMPONENT_PARALLELISM\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(bolt_parallelism)\n for stream in input_streams:\n bolt.inputs.add().stream.CopyFrom(stream)\n for stream in output_streams:\n bolt.outputs.add().stream.CopyFrom(stream)\n return bolt\n\n def create_mock_simple_topology(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n \"\"\"\n Simple topology contains one spout and one bolt.\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = MockProto.topology_id\n topology.name = MockProto.topology_name\n\n # Stream1\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout\"\n\n # Spout1\n spout = self.create_mock_spout(\"mock_spout\", [stream1], spout_parallelism)\n topology.spouts.extend([spout])\n\n # Bolt1\n bolt = self.create_mock_bolt(\"mock_bolt\", [stream1], [], bolt_parallelism)\n topology.bolts.extend([bolt])\n\n return topology\n\n def create_mock_medium_topology(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n \"\"\"\n Medium topology is a three stage topology\n with one spout, two mid stage bolts, and one\n last stage bolt.\n S -str1-> B1 -str3-> B3\n S -str2-> B2 -str4-> B3\n \"\"\"\n topology = protoTopology.Topology()\n topology.id = \"mock_topology_id\"\n topology.name = \"mock_topology_name\"\n\n # Streams\n stream1 = protoTopology.StreamId()\n stream1.id = \"mock_stream1\"\n stream1.component_name = \"mock_spout1\"\n\n stream2 = protoTopology.StreamId()\n stream2.id = \"mock_stream2\"\n stream2.component_name = \"mock_spout1\"\n\n stream3 = protoTopology.StreamId()\n stream3.id = \"mock_stream3\"\n stream3.component_name = \"mock_bolt1\"\n\n stream4 = protoTopology.StreamId()\n stream4.id = \"mock_stream4\"\n stream4.component_name = \"mock_bolt2\"\n\n # Spouts\n spout1 = self.create_mock_spout(\"mock_spout1\",\n [stream1, stream2],\n spout_parallelism)\n topology.spouts.extend([spout1])\n\n # Bolts\n bolt1 = self.create_mock_bolt(\"mock_bolt1\",\n [stream1],\n [stream3],\n bolt1_parallelism)\n bolt2 = self.create_mock_bolt(\"mock_bolt2\",\n [stream2],\n [stream4],\n bolt2_parallelism)\n bolt3 = self.create_mock_bolt(\"mock_bolt3\",\n [stream3, stream4],\n [],\n bolt3_parallelism)\n topology.bolts.extend([bolt1, bolt2, bolt3])\n\n\n return topology\n\n def create_mock_simple_physical_plan(\n self,\n spout_parallelism=1,\n bolt_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_simple_topology(\n spout_parallelism,\n bolt_parallelism))\n return pplan\n\n def create_mock_medium_physical_plan(\n self,\n spout_parallelism=1,\n bolt1_parallelism=1,\n bolt2_parallelism=1,\n bolt3_parallelism=1):\n pplan = protoPPlan.PhysicalPlan()\n pplan.topology.CopyFrom(self.create_mock_medium_topology(\n spout_parallelism,\n bolt1_parallelism,\n bolt2_parallelism,\n bolt3_parallelism))\n return pplan\n\n def create_mock_execution_state(self):\n estate = protoEState.ExecutionState()\n estate.topology_name = MockProto.topology_name\n estate.topology_id = MockProto.topology_id\n estate.cluster = MockProto.cluster\n estate.environ = MockProto.environ\n return estate\n\n def create_mock_tmaster(self):\n tmaster = protoTmaster.TMasterLocation()\n return tmaster\n\n def add_topology_config(self, topology, key, value):\n kv = topology.topology_config.kvs.add()\n kv.key = key\n kv.type = protoTopology.ConfigValueType.Value('STRING_VALUE')\n kv.value = str(value)\n", "step-ids": [ 9, 10, 12, 13, 14 ] }
[ 9, 10, 12, 13, 14 ]
def calculaEuclidiana(obj1,obj2): soma = 0 for I in range(len(obj1)): soma += (obj1[I] - obj2[I])**2 return soma ** 0.5 def calculaMinkowski(obj1,obj2,p): # p = 2 => distancia Euclidiana # p = 1 => distancia de Manhattan soma = 0 for I in range(len(obj1)): soma += (abs(obj1[I] - obj2[I]))**p return soma ** (1/p) def delta(obj1,obj2): if((obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None')): return 0 else: return 1 def calculaMinkowskiNormalizada(obj1,obj2,p): soma = 0 somaDelta = 0 for I in range(len(obj1)): if(delta(obj1[I],obj2[I])): somaDelta+=1 soma += (abs(obj1[I] - obj2[I])) ** p return (soma ** (1/p))/somaDelta # def calculaMahalanobis() obj1 = {} obj1[0] = 2 obj1[1] = -1 obj1[2] = None obj1[3] = 0 # print("len ",obj1[2]) obj2 = {} obj2[0] = 7 obj2[1] = 0 obj2[2] = -4 obj2[3] = 8 # print("Result Euclidiana = ",calculaEuclidiana(obj1,obj2)) # print("Result Minkowski = ", calculaMinkowski(obj1,obj2,2)) # print("Result Minkowski normalizada = ", calculaMinkowskiNormalizada(obj1,obj2,2))
normal
{ "blob_id": "6c349b7b4d82b37ec1b1ff8e0d35a3557ed1af67", "index": 4613, "step-1": "<mask token>\n\n\ndef calculaMinkowski(obj1, obj2, p):\n soma = 0\n for I in range(len(obj1)):\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p)\n\n\n<mask token>\n\n\ndef calculaMinkowskiNormalizada(obj1, obj2, p):\n soma = 0\n somaDelta = 0\n for I in range(len(obj1)):\n if delta(obj1[I], obj2[I]):\n somaDelta += 1\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p) / somaDelta\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calculaMinkowski(obj1, obj2, p):\n soma = 0\n for I in range(len(obj1)):\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p)\n\n\ndef delta(obj1, obj2):\n if (obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None'):\n return 0\n else:\n return 1\n\n\ndef calculaMinkowskiNormalizada(obj1, obj2, p):\n soma = 0\n somaDelta = 0\n for I in range(len(obj1)):\n if delta(obj1[I], obj2[I]):\n somaDelta += 1\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p) / somaDelta\n\n\n<mask token>\n", "step-3": "def calculaEuclidiana(obj1, obj2):\n soma = 0\n for I in range(len(obj1)):\n soma += (obj1[I] - obj2[I]) ** 2\n return soma ** 0.5\n\n\ndef calculaMinkowski(obj1, obj2, p):\n soma = 0\n for I in range(len(obj1)):\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p)\n\n\ndef delta(obj1, obj2):\n if (obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None'):\n return 0\n else:\n return 1\n\n\ndef calculaMinkowskiNormalizada(obj1, obj2, p):\n soma = 0\n somaDelta = 0\n for I in range(len(obj1)):\n if delta(obj1[I], obj2[I]):\n somaDelta += 1\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p) / somaDelta\n\n\n<mask token>\n", "step-4": "def calculaEuclidiana(obj1, obj2):\n soma = 0\n for I in range(len(obj1)):\n soma += (obj1[I] - obj2[I]) ** 2\n return soma ** 0.5\n\n\ndef calculaMinkowski(obj1, obj2, p):\n soma = 0\n for I in range(len(obj1)):\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p)\n\n\ndef delta(obj1, obj2):\n if (obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None'):\n return 0\n else:\n return 1\n\n\ndef calculaMinkowskiNormalizada(obj1, obj2, p):\n soma = 0\n somaDelta = 0\n for I in range(len(obj1)):\n if delta(obj1[I], obj2[I]):\n somaDelta += 1\n soma += abs(obj1[I] - obj2[I]) ** p\n return soma ** (1 / p) / somaDelta\n\n\nobj1 = {}\nobj1[0] = 2\nobj1[1] = -1\nobj1[2] = None\nobj1[3] = 0\nobj2 = {}\nobj2[0] = 7\nobj2[1] = 0\nobj2[2] = -4\nobj2[3] = 8\n", "step-5": "def calculaEuclidiana(obj1,obj2):\n soma = 0\n for I in range(len(obj1)):\n soma += (obj1[I] - obj2[I])**2\n return soma ** 0.5\n\ndef calculaMinkowski(obj1,obj2,p):\n # p = 2 => distancia Euclidiana\n # p = 1 => distancia de Manhattan\n soma = 0\n for I in range(len(obj1)):\n soma += (abs(obj1[I] - obj2[I]))**p\n return soma ** (1/p) \n\ndef delta(obj1,obj2):\n if((obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None')):\n return 0\n else:\n return 1\n\ndef calculaMinkowskiNormalizada(obj1,obj2,p):\n soma = 0\n somaDelta = 0\n for I in range(len(obj1)):\n if(delta(obj1[I],obj2[I])):\n somaDelta+=1\n soma += (abs(obj1[I] - obj2[I])) ** p\n return (soma ** (1/p))/somaDelta\n\n# def calculaMahalanobis()\n\nobj1 = {}\nobj1[0] = 2\nobj1[1] = -1\nobj1[2] = None\nobj1[3] = 0\n\n# print(\"len \",obj1[2])\n\nobj2 = {}\nobj2[0] = 7\nobj2[1] = 0\nobj2[2] = -4\nobj2[3] = 8\n\n# print(\"Result Euclidiana = \",calculaEuclidiana(obj1,obj2))\n\n# print(\"Result Minkowski = \", calculaMinkowski(obj1,obj2,2))\n\n# print(\"Result Minkowski normalizada = \", calculaMinkowskiNormalizada(obj1,obj2,2))", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import turtle pen = turtle.Turtle() def curve(): for i in range(200): pen.right(1) pen.forward(1) def heart(): pen.fillcolor('yellow') pen.begin_fill() pen.left(140) pen.forward(113) curve() pen.left(120) curve() pen.forward(112) pen.end_fill() heart()
normal
{ "blob_id": "fa925d0ef4f9df3fdf9a51c7fcc88933609bc9e3", "index": 3980, "step-1": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n", "step-3": "<mask token>\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n", "step-4": "import turtle\npen = turtle.Turtle()\n\n\ndef curve():\n for i in range(200):\n pen.right(1)\n pen.forward(1)\n\n\ndef heart():\n pen.fillcolor('yellow')\n pen.begin_fill()\n pen.left(140)\n pen.forward(113)\n curve()\n pen.left(120)\n curve()\n pen.forward(112)\n pen.end_fill()\n\n\nheart()\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
class TrieNode: def __init__(self): self.children = [None for i in range(26)] self.isEndOfWord = 0 class Trie: def __init__(self): self.root = self.getNode() def getNode(self): return TrieNode() def insert(self, key): root = self.root length = len(key) for level in range(length): index = ord(key[level])-ord('a') if root.children[index]==None: root.children[index] = self.getNode() root = root.children[index] root.isEndOfWord = 1 def search(self, key): root = self.root for level,c in enumerate(key): if root.children[ord(c)-ord('a')]==None: return False root = root.children[ord(c)-ord('a')] return root!=None and root.isEndOfWord==1 keys = ["the","a","there","anaswe","any", "by","their"] output = ["Not present in trie", "Present in tire"] # Trie object t = Trie() # Construct trie for key in keys: print 'inserting key, ', key t.insert(key) print("{} ---- {}".format("the",output[t.search("the")])) print("{} ---- {}".format("these",output[t.search("these")]))
normal
{ "blob_id": "5c7c90717f2e98c26675fec6390b4ea9797d6a4e", "index": 2240, "step-1": "class TrieNode:\n\tdef __init__(self):\n\t\tself.children = [None for i in range(26)]\n\t\tself.isEndOfWord = 0\nclass Trie:\n\tdef __init__(self):\n\t\tself.root = self.getNode()\n\tdef getNode(self):\n\t\treturn TrieNode()\n\tdef insert(self, key):\n\t\troot = self.root\n\t\tlength = len(key)\n\t\tfor level in range(length):\n\t\t\tindex = ord(key[level])-ord('a')\n\t\t\tif root.children[index]==None:\n\t\t\t\troot.children[index] = self.getNode()\n\t\t\troot = root.children[index]\n\t\troot.isEndOfWord = 1\n\tdef search(self, key):\n\t\troot = self.root\n\t\tfor level,c in enumerate(key):\n\t\t\t if root.children[ord(c)-ord('a')]==None:\n\t\t\t\treturn False\n\t\t\t root = root.children[ord(c)-ord('a')]\n\t\treturn root!=None and root.isEndOfWord==1\nkeys = [\"the\",\"a\",\"there\",\"anaswe\",\"any\", \"by\",\"their\"] \noutput = [\"Not present in trie\", \"Present in tire\"] \n \n# Trie object \nt = Trie() \n \n# Construct trie \nfor key in keys: \n print 'inserting key, ', key\n t.insert(key) \t\nprint(\"{} ---- {}\".format(\"the\",output[t.search(\"the\")])) \t\t\nprint(\"{} ---- {}\".format(\"these\",output[t.search(\"these\")]))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('WilkJMC3.py', 1), ( 'LeakWTI3.py', 1), ('AubePRP.py', 2), ('GellWPT.py', 2), ('AdamWEP.py', 1), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('WadeJEB.py', 1), ( 'SoutRD.py', 2), ('WheeJPT.py', 1), ('HowiWRL2.py', 1), ('WilkJMC.py', 1), ('WestJIT.py', 1), ('DequTKM.py', 2), ('StorJCC.py', 1), ( 'DibdTRL.py', 1), ('TaylIF.py', 1), ('ThomWEC.py', 1)]
normal
{ "blob_id": "dce496c9ae6605e95ffbbb2885ec15b19fb756ef", "index": 2799, "step-1": "<mask token>\n", "step-2": "ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('WilkJMC3.py', 1), (\n 'LeakWTI3.py', 1), ('AubePRP.py', 2), ('GellWPT.py', 2), ('AdamWEP.py',\n 1), ('KiddJAE.py', 1), ('CoolWHM.py', 1), ('WadeJEB.py', 1), (\n 'SoutRD.py', 2), ('WheeJPT.py', 1), ('HowiWRL2.py', 1), ('WilkJMC.py', \n 1), ('WestJIT.py', 1), ('DequTKM.py', 2), ('StorJCC.py', 1), (\n 'DibdTRL.py', 1), ('TaylIF.py', 1), ('ThomWEC.py', 1)]\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
#inject shellcode from pwn import * shellcode =p32(0x8049000+0x4)\ +asm("mov eax,SYS_execve")\ +asm("xor ecx,ecx")\ +asm("xor edx,edx")\ +asm("mov ebx,0x8049014")\ +asm("int 0x80")\ +"/bin/sh" r=process("./stack0",aslr=True) r.sendline('A'*(0x4c)+p32(0x8049000-0x4)+p32(0x804840c)+p32(0x8049000)) r.sendline(shellcode) r.interactive()
normal
{ "blob_id": "cf70d6064fd4a43bc17cd852aaf04afade73d995", "index": 9252, "step-1": "<mask token>\n", "step-2": "<mask token>\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n", "step-3": "<mask token>\nshellcode = p32(134516736 + 4) + asm('mov eax,SYS_execve') + asm('xor ecx,ecx'\n ) + asm('xor edx,edx') + asm('mov ebx,0x8049014') + asm('int 0x80'\n ) + '/bin/sh'\nr = process('./stack0', aslr=True)\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n", "step-4": "from pwn import *\nshellcode = p32(134516736 + 4) + asm('mov eax,SYS_execve') + asm('xor ecx,ecx'\n ) + asm('xor edx,edx') + asm('mov ebx,0x8049014') + asm('int 0x80'\n ) + '/bin/sh'\nr = process('./stack0', aslr=True)\nr.sendline('A' * 76 + p32(134516736 - 4) + p32(134513676) + p32(134516736))\nr.sendline(shellcode)\nr.interactive()\n", "step-5": "#inject shellcode\nfrom pwn import *\n\n\nshellcode =p32(0x8049000+0x4)\\\n+asm(\"mov eax,SYS_execve\")\\\n+asm(\"xor ecx,ecx\")\\\n+asm(\"xor edx,edx\")\\\n+asm(\"mov ebx,0x8049014\")\\\n+asm(\"int 0x80\")\\\n+\"/bin/sh\"\nr=process(\"./stack0\",aslr=True)\nr.sendline('A'*(0x4c)+p32(0x8049000-0x4)+p32(0x804840c)+p32(0x8049000))\nr.sendline(shellcode)\nr.interactive()\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
''' # AWS::Chatbot Construct Library AWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location. This module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project. ```python import aws_cdk.aws_chatbot as chatbot import aws_cdk.aws_sns as sns import aws_cdk.aws_iam as iam slack_channel = chatbot.SlackChannelConfiguration(self, "MySlackChannel", slack_channel_configuration_name="YOUR_CHANNEL_NAME", slack_workspace_id="YOUR_SLACK_WORKSPACE_ID", slack_channel_id="YOUR_SLACK_CHANNEL_ID" ) slack_channel.add_to_role_policy(iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:GetObject" ], resources=["arn:aws:s3:::abc/xyz/123.txt"] )) slack_channel.add_notification_topic(sns.Topic(self, "MyTopic")) ``` ## Log Group Slack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with log data set to never expire. The `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists. If the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default). By default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property allows you to customize the maximum number of retries and base backoff duration. *Note* that, if `logRetention` is set, a [CloudFormation custom resource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added to the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the correct log retention period (never expire, by default). ''' import abc import builtins import datetime import enum import typing import jsii import publication import typing_extensions from typeguard import check_type from .._jsii import * import constructs from .. import ( CfnResource as _CfnResource_9df397a6, Duration as _Duration_4839e8c3, IInspectable as _IInspectable_c2943556, IResolvable as _IResolvable_da3f097b, IResource as _IResource_c80c4260, Resource as _Resource_45bc6135, TreeInspector as _TreeInspector_488e0dd5, ) from ..aws_cloudwatch import ( Metric as _Metric_e396a4dc, MetricOptions as _MetricOptions_1788b62f, Unit as _Unit_61bc6f70, ) from ..aws_codestarnotifications import ( INotificationRuleTarget as _INotificationRuleTarget_faa3b79b, NotificationRuleTargetConfig as _NotificationRuleTargetConfig_ea27e095, ) from ..aws_iam import ( IGrantable as _IGrantable_71c4f5de, IPrincipal as _IPrincipal_539bb2fd, IRole as _IRole_235f5d8e, PolicyStatement as _PolicyStatement_0fe33853, ) from ..aws_logs import ( LogRetentionRetryOptions as _LogRetentionRetryOptions_62d80a14, RetentionDays as _RetentionDays_070f99f0, ) from ..aws_sns import ITopic as _ITopic_9eca4852 @jsii.implements(_IInspectable_c2943556) class CfnSlackChannelConfiguration( _CfnResource_9df397a6, metaclass=jsii.JSIIMeta, jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration", ): '''A CloudFormation ``AWS::Chatbot::SlackChannelConfiguration``. The ``AWS::Chatbot::SlackChannelConfiguration`` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates. This resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* . :cloudformationResource: AWS::Chatbot::SlackChannelConfiguration :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html :exampleMetadata: fixture=_generated Example:: # The code below shows an example of how to instantiate this type. # The values are placeholders you should change. from aws_cdk import aws_chatbot as chatbot cfn_slack_channel_configuration = chatbot.CfnSlackChannelConfiguration(self, "MyCfnSlackChannelConfiguration", configuration_name="configurationName", iam_role_arn="iamRoleArn", slack_channel_id="slackChannelId", slack_workspace_id="slackWorkspaceId", # the properties below are optional guardrail_policies=["guardrailPolicies"], logging_level="loggingLevel", sns_topic_arns=["snsTopicArns"], user_role_required=False ) ''' def __init__( self, scope: constructs.Construct, id: builtins.str, *, configuration_name: builtins.str, iam_role_arn: builtins.str, slack_channel_id: builtins.str, slack_workspace_id: builtins.str, guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None, logging_level: typing.Optional[builtins.str] = None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None, user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None, ) -> None: '''Create a new ``AWS::Chatbot::SlackChannelConfiguration``. :param scope: - scope in which this resource is defined. :param id: - scoped id of the resource. :param configuration_name: The name of the configuration. :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ . :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` . :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* . :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set. :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` . :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot . :param user_role_required: Enables use of a user role requirement in your chat configuration. ''' if __debug__: type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.__init__) check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"]) check_type(argname="argument id", value=id, expected_type=type_hints["id"]) props = CfnSlackChannelConfigurationProps( configuration_name=configuration_name, iam_role_arn=iam_role_arn, slack_channel_id=slack_channel_id, slack_workspace_id=slack_workspace_id, guardrail_policies=guardrail_policies, logging_level=logging_level, sns_topic_arns=sns_topic_arns, user_role_required=user_role_required, ) jsii.create(self.__class__, self, [scope, id, props]) @jsii.member(jsii_name="inspect") def inspect(self, inspector: _TreeInspector_488e0dd5) -> None: '''Examines the CloudFormation resource and discloses attributes. :param inspector: - tree inspector to collect and process attributes. ''' if __debug__: type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.inspect) check_type(argname="argument inspector", value=inspector, expected_type=type_hints["inspector"]) return typing.cast(None, jsii.invoke(self, "inspect", [inspector])) @jsii.member(jsii_name="renderProperties") def _render_properties( self, props: typing.Mapping[builtins.str, typing.Any], ) -> typing.Mapping[builtins.str, typing.Any]: ''' :param props: - ''' if __debug__: type_hints = typing.get_type_hints(CfnSlackChannelConfiguration._render_properties) check_type(argname="argument props", value=props, expected_type=type_hints["props"]) return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props])) @jsii.python.classproperty # type: ignore[misc] @jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME") def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str: '''The CloudFormation resource type name for this resource class.''' return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="attrArn") def attr_arn(self) -> builtins.str: ''' :cloudformationAttribute: Arn ''' return typing.cast(builtins.str, jsii.get(self, "attrArn")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="cfnProperties") def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]: return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="configurationName") def configuration_name(self) -> builtins.str: '''The name of the configuration. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname ''' return typing.cast(builtins.str, jsii.get(self, "configurationName")) @configuration_name.setter def configuration_name(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "configuration_name").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "configurationName", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="iamRoleArn") def iam_role_arn(self) -> builtins.str: '''The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn ''' return typing.cast(builtins.str, jsii.get(self, "iamRoleArn")) @iam_role_arn.setter def iam_role_arn(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "iam_role_arn").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "iamRoleArn", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelId") def slack_channel_id(self) -> builtins.str: '''The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid ''' return typing.cast(builtins.str, jsii.get(self, "slackChannelId")) @slack_channel_id.setter def slack_channel_id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_channel_id").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "slackChannelId", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackWorkspaceId") def slack_workspace_id(self) -> builtins.str: '''The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid ''' return typing.cast(builtins.str, jsii.get(self, "slackWorkspaceId")) @slack_workspace_id.setter def slack_workspace_id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "slack_workspace_id").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "slackWorkspaceId", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="guardrailPolicies") def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]: '''The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies ''' return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "guardrailPolicies")) @guardrail_policies.setter def guardrail_policies( self, value: typing.Optional[typing.List[builtins.str]], ) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "guardrail_policies").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "guardrailPolicies", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="loggingLevel") def logging_level(self) -> typing.Optional[builtins.str]: '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel ''' return typing.cast(typing.Optional[builtins.str], jsii.get(self, "loggingLevel")) @logging_level.setter def logging_level(self, value: typing.Optional[builtins.str]) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "logging_level").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "loggingLevel", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="snsTopicArns") def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]: '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns ''' return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, "snsTopicArns")) @sns_topic_arns.setter def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "sns_topic_arns").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "snsTopicArns", value) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="userRoleRequired") def user_role_required( self, ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]: '''Enables use of a user role requirement in your chat configuration. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired ''' return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, "userRoleRequired")) @user_role_required.setter def user_role_required( self, value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], ) -> None: if __debug__: type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, "user_role_required").fset) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "userRoleRequired", value) @jsii.data_type( jsii_type="aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps", jsii_struct_bases=[], name_mapping={ "configuration_name": "configurationName", "iam_role_arn": "iamRoleArn", "slack_channel_id": "slackChannelId", "slack_workspace_id": "slackWorkspaceId", "guardrail_policies": "guardrailPolicies", "logging_level": "loggingLevel", "sns_topic_arns": "snsTopicArns", "user_role_required": "userRoleRequired", }, ) class CfnSlackChannelConfigurationProps: def __init__( self, *, configuration_name: builtins.str, iam_role_arn: builtins.str, slack_channel_id: builtins.str, slack_workspace_id: builtins.str, guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None, logging_level: typing.Optional[builtins.str] = None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None, user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None, ) -> None: '''Properties for defining a ``CfnSlackChannelConfiguration``. :param configuration_name: The name of the configuration. :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ . :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` . :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* . :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set. :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` . :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot . :param user_role_required: Enables use of a user role requirement in your chat configuration. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html :exampleMetadata: fixture=_generated Example:: # The code below shows an example of how to instantiate this type. # The values are placeholders you should change. from aws_cdk import aws_chatbot as chatbot cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps( configuration_name="configurationName", iam_role_arn="iamRoleArn", slack_channel_id="slackChannelId", slack_workspace_id="slackWorkspaceId", # the properties below are optional guardrail_policies=["guardrailPolicies"], logging_level="loggingLevel", sns_topic_arns=["snsTopicArns"], user_role_required=False ) ''' if __debug__: type_hints = typing.get_type_hints(CfnSlackChannelConfigurationProps.__init__) check_type(argname="argument configuration_name", value=configuration_name, expected_type=type_hints["configuration_name"]) check_type(argname="argument iam_role_arn", value=iam_role_arn, expected_type=type_hints["iam_role_arn"]) check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"]) check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"]) check_type(argname="argument guardrail_policies", value=guardrail_policies, expected_type=type_hints["guardrail_policies"]) check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"]) check_type(argname="argument sns_topic_arns", value=sns_topic_arns, expected_type=type_hints["sns_topic_arns"]) check_type(argname="argument user_role_required", value=user_role_required, expected_type=type_hints["user_role_required"]) self._values: typing.Dict[str, typing.Any] = { "configuration_name": configuration_name, "iam_role_arn": iam_role_arn, "slack_channel_id": slack_channel_id, "slack_workspace_id": slack_workspace_id, } if guardrail_policies is not None: self._values["guardrail_policies"] = guardrail_policies if logging_level is not None: self._values["logging_level"] = logging_level if sns_topic_arns is not None: self._values["sns_topic_arns"] = sns_topic_arns if user_role_required is not None: self._values["user_role_required"] = user_role_required @builtins.property def configuration_name(self) -> builtins.str: '''The name of the configuration. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname ''' result = self._values.get("configuration_name") assert result is not None, "Required property 'configuration_name' is missing" return typing.cast(builtins.str, result) @builtins.property def iam_role_arn(self) -> builtins.str: '''The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn ''' result = self._values.get("iam_role_arn") assert result is not None, "Required property 'iam_role_arn' is missing" return typing.cast(builtins.str, result) @builtins.property def slack_channel_id(self) -> builtins.str: '''The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid ''' result = self._values.get("slack_channel_id") assert result is not None, "Required property 'slack_channel_id' is missing" return typing.cast(builtins.str, result) @builtins.property def slack_workspace_id(self) -> builtins.str: '''The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid ''' result = self._values.get("slack_workspace_id") assert result is not None, "Required property 'slack_workspace_id' is missing" return typing.cast(builtins.str, result) @builtins.property def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]: '''The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies ''' result = self._values.get("guardrail_policies") return typing.cast(typing.Optional[typing.List[builtins.str]], result) @builtins.property def logging_level(self) -> typing.Optional[builtins.str]: '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel ''' result = self._values.get("logging_level") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]: '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot . :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns ''' result = self._values.get("sns_topic_arns") return typing.cast(typing.Optional[typing.List[builtins.str]], result) @builtins.property def user_role_required( self, ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]: '''Enables use of a user role requirement in your chat configuration. :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired ''' result = self._values.get("user_role_required") return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "CfnSlackChannelConfigurationProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.interface(jsii_type="aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration") class ISlackChannelConfiguration( _IResource_c80c4260, _IGrantable_71c4f5de, _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol, ): '''Represents a Slack channel configuration.''' @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationArn") def slack_channel_configuration_arn(self) -> builtins.str: '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}. :attribute: true ''' ... @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationName") def slack_channel_configuration_name(self) -> builtins.str: '''The name of Slack channel configuration. :attribute: true ''' ... @builtins.property # type: ignore[misc] @jsii.member(jsii_name="role") def role(self) -> typing.Optional[_IRole_235f5d8e]: '''The permission role of Slack channel configuration. :default: - A role will be created. :attribute: true ''' ... @jsii.member(jsii_name="addToRolePolicy") def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None: '''Adds a statement to the IAM role. :param statement: - ''' ... @jsii.member(jsii_name="metric") def metric( self, metric_name: builtins.str, *, account: typing.Optional[builtins.str] = None, color: typing.Optional[builtins.str] = None, dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None, label: typing.Optional[builtins.str] = None, period: typing.Optional[_Duration_4839e8c3] = None, region: typing.Optional[builtins.str] = None, statistic: typing.Optional[builtins.str] = None, unit: typing.Optional[_Unit_61bc6f70] = None, ) -> _Metric_e396a4dc: '''Return the given named metric for this SlackChannelConfiguration. :param metric_name: - :param account: Account which this metric comes from. Default: - Deployment account. :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color :param dimensions_map: Dimensions of the metric. Default: - No dimensions. :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5) :param region: Region which this metric comes from. Default: - Deployment region. :param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream ''' ... class _ISlackChannelConfigurationProxy( jsii.proxy_for(_IResource_c80c4260), # type: ignore[misc] jsii.proxy_for(_IGrantable_71c4f5de), # type: ignore[misc] jsii.proxy_for(_INotificationRuleTarget_faa3b79b), # type: ignore[misc] ): '''Represents a Slack channel configuration.''' __jsii_type__: typing.ClassVar[str] = "aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration" @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationArn") def slack_channel_configuration_arn(self) -> builtins.str: '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}. :attribute: true ''' return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationName") def slack_channel_configuration_name(self) -> builtins.str: '''The name of Slack channel configuration. :attribute: true ''' return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="role") def role(self) -> typing.Optional[_IRole_235f5d8e]: '''The permission role of Slack channel configuration. :default: - A role will be created. :attribute: true ''' return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role")) @jsii.member(jsii_name="addToRolePolicy") def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None: '''Adds a statement to the IAM role. :param statement: - ''' if __debug__: type_hints = typing.get_type_hints(ISlackChannelConfiguration.add_to_role_policy) check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"]) return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement])) @jsii.member(jsii_name="metric") def metric( self, metric_name: builtins.str, *, account: typing.Optional[builtins.str] = None, color: typing.Optional[builtins.str] = None, dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None, label: typing.Optional[builtins.str] = None, period: typing.Optional[_Duration_4839e8c3] = None, region: typing.Optional[builtins.str] = None, statistic: typing.Optional[builtins.str] = None, unit: typing.Optional[_Unit_61bc6f70] = None, ) -> _Metric_e396a4dc: '''Return the given named metric for this SlackChannelConfiguration. :param metric_name: - :param account: Account which this metric comes from. Default: - Deployment account. :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color :param dimensions_map: Dimensions of the metric. Default: - No dimensions. :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5) :param region: Region which this metric comes from. Default: - Deployment region. :param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream ''' if __debug__: type_hints = typing.get_type_hints(ISlackChannelConfiguration.metric) check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"]) props = _MetricOptions_1788b62f( account=account, color=color, dimensions_map=dimensions_map, label=label, period=period, region=region, statistic=statistic, unit=unit, ) return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props])) # Adding a "__jsii_proxy_class__(): typing.Type" function to the interface typing.cast(typing.Any, ISlackChannelConfiguration).__jsii_proxy_class__ = lambda : _ISlackChannelConfigurationProxy @jsii.enum(jsii_type="aws-cdk-lib.aws_chatbot.LoggingLevel") class LoggingLevel(enum.Enum): '''Logging levels include ERROR, INFO, or NONE.''' ERROR = "ERROR" '''ERROR.''' INFO = "INFO" '''INFO.''' NONE = "NONE" '''NONE.''' @jsii.implements(ISlackChannelConfiguration) class SlackChannelConfiguration( _Resource_45bc6135, metaclass=jsii.JSIIMeta, jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfiguration", ): '''A new Slack channel configuration. :exampleMetadata: infused Example:: import aws_cdk.aws_chatbot as chatbot # project: codebuild.Project target = chatbot.SlackChannelConfiguration(self, "MySlackChannel", slack_channel_configuration_name="YOUR_CHANNEL_NAME", slack_workspace_id="YOUR_SLACK_WORKSPACE_ID", slack_channel_id="YOUR_SLACK_CHANNEL_ID" ) rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target) ''' def __init__( self, scope: constructs.Construct, id: builtins.str, *, slack_channel_configuration_name: builtins.str, slack_channel_id: builtins.str, slack_workspace_id: builtins.str, logging_level: typing.Optional[LoggingLevel] = None, log_retention: typing.Optional[_RetentionDays_070f99f0] = None, log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None, log_retention_role: typing.Optional[_IRole_235f5d8e] = None, notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None, role: typing.Optional[_IRole_235f5d8e] = None, ) -> None: ''' :param scope: - :param id: - :param slack_channel_configuration_name: The name of Slack channel configuration. :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ. :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide. :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options. :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created. :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None :param role: The permission role of Slack channel configuration. Default: - A role will be created. ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.__init__) check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"]) check_type(argname="argument id", value=id, expected_type=type_hints["id"]) props = SlackChannelConfigurationProps( slack_channel_configuration_name=slack_channel_configuration_name, slack_channel_id=slack_channel_id, slack_workspace_id=slack_workspace_id, logging_level=logging_level, log_retention=log_retention, log_retention_retry_options=log_retention_retry_options, log_retention_role=log_retention_role, notification_topics=notification_topics, role=role, ) jsii.create(self.__class__, self, [scope, id, props]) @jsii.member(jsii_name="fromSlackChannelConfigurationArn") # type: ignore[misc] @builtins.classmethod def from_slack_channel_configuration_arn( cls, scope: constructs.Construct, id: builtins.str, slack_channel_configuration_arn: builtins.str, ) -> ISlackChannelConfiguration: '''Import an existing Slack channel configuration provided an ARN. :param scope: The parent creating construct. :param id: The construct's name. :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack). :return: a reference to the existing Slack channel configuration ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.from_slack_channel_configuration_arn) check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"]) check_type(argname="argument id", value=id, expected_type=type_hints["id"]) check_type(argname="argument slack_channel_configuration_arn", value=slack_channel_configuration_arn, expected_type=type_hints["slack_channel_configuration_arn"]) return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls, "fromSlackChannelConfigurationArn", [scope, id, slack_channel_configuration_arn])) @jsii.member(jsii_name="metricAll") # type: ignore[misc] @builtins.classmethod def metric_all( cls, metric_name: builtins.str, *, account: typing.Optional[builtins.str] = None, color: typing.Optional[builtins.str] = None, dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None, label: typing.Optional[builtins.str] = None, period: typing.Optional[_Duration_4839e8c3] = None, region: typing.Optional[builtins.str] = None, statistic: typing.Optional[builtins.str] = None, unit: typing.Optional[_Unit_61bc6f70] = None, ) -> _Metric_e396a4dc: '''Return the given named metric for All SlackChannelConfigurations. :param metric_name: - :param account: Account which this metric comes from. Default: - Deployment account. :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color :param dimensions_map: Dimensions of the metric. Default: - No dimensions. :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5) :param region: Region which this metric comes from. Default: - Deployment region. :param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.metric_all) check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"]) props = _MetricOptions_1788b62f( account=account, color=color, dimensions_map=dimensions_map, label=label, period=period, region=region, statistic=statistic, unit=unit, ) return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, "metricAll", [metric_name, props])) @jsii.member(jsii_name="addNotificationTopic") def add_notification_topic(self, notification_topic: _ITopic_9eca4852) -> None: '''Adds a SNS topic that deliver notifications to AWS Chatbot. :param notification_topic: - ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.add_notification_topic) check_type(argname="argument notification_topic", value=notification_topic, expected_type=type_hints["notification_topic"]) return typing.cast(None, jsii.invoke(self, "addNotificationTopic", [notification_topic])) @jsii.member(jsii_name="addToRolePolicy") def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None: '''Adds extra permission to iam-role of Slack channel configuration. :param statement: - ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.add_to_role_policy) check_type(argname="argument statement", value=statement, expected_type=type_hints["statement"]) return typing.cast(None, jsii.invoke(self, "addToRolePolicy", [statement])) @jsii.member(jsii_name="bindAsNotificationRuleTarget") def bind_as_notification_rule_target( self, _scope: constructs.Construct, ) -> _NotificationRuleTargetConfig_ea27e095: '''Returns a target configuration for notification rule. :param _scope: - ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.bind_as_notification_rule_target) check_type(argname="argument _scope", value=_scope, expected_type=type_hints["_scope"]) return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.invoke(self, "bindAsNotificationRuleTarget", [_scope])) @jsii.member(jsii_name="metric") def metric( self, metric_name: builtins.str, *, account: typing.Optional[builtins.str] = None, color: typing.Optional[builtins.str] = None, dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None, label: typing.Optional[builtins.str] = None, period: typing.Optional[_Duration_4839e8c3] = None, region: typing.Optional[builtins.str] = None, statistic: typing.Optional[builtins.str] = None, unit: typing.Optional[_Unit_61bc6f70] = None, ) -> _Metric_e396a4dc: '''Return the given named metric for this SlackChannelConfiguration. :param metric_name: - :param account: Account which this metric comes from. Default: - Deployment account. :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color :param dimensions_map: Dimensions of the metric. Default: - No dimensions. :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5) :param region: Region which this metric comes from. Default: - Deployment region. :param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream ''' if __debug__: type_hints = typing.get_type_hints(SlackChannelConfiguration.metric) check_type(argname="argument metric_name", value=metric_name, expected_type=type_hints["metric_name"]) props = _MetricOptions_1788b62f( account=account, color=color, dimensions_map=dimensions_map, label=label, period=period, region=region, statistic=statistic, unit=unit, ) return typing.cast(_Metric_e396a4dc, jsii.invoke(self, "metric", [metric_name, props])) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="grantPrincipal") def grant_principal(self) -> _IPrincipal_539bb2fd: '''The principal to grant permissions to.''' return typing.cast(_IPrincipal_539bb2fd, jsii.get(self, "grantPrincipal")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationArn") def slack_channel_configuration_arn(self) -> builtins.str: '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.''' return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationArn")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="slackChannelConfigurationName") def slack_channel_configuration_name(self) -> builtins.str: '''The name of Slack channel configuration.''' return typing.cast(builtins.str, jsii.get(self, "slackChannelConfigurationName")) @builtins.property # type: ignore[misc] @jsii.member(jsii_name="role") def role(self) -> typing.Optional[_IRole_235f5d8e]: '''The permission role of Slack channel configuration.''' return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, "role")) @jsii.data_type( jsii_type="aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps", jsii_struct_bases=[], name_mapping={ "slack_channel_configuration_name": "slackChannelConfigurationName", "slack_channel_id": "slackChannelId", "slack_workspace_id": "slackWorkspaceId", "logging_level": "loggingLevel", "log_retention": "logRetention", "log_retention_retry_options": "logRetentionRetryOptions", "log_retention_role": "logRetentionRole", "notification_topics": "notificationTopics", "role": "role", }, ) class SlackChannelConfigurationProps: def __init__( self, *, slack_channel_configuration_name: builtins.str, slack_channel_id: builtins.str, slack_workspace_id: builtins.str, logging_level: typing.Optional[LoggingLevel] = None, log_retention: typing.Optional[_RetentionDays_070f99f0] = None, log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None, log_retention_role: typing.Optional[_IRole_235f5d8e] = None, notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None, role: typing.Optional[_IRole_235f5d8e] = None, ) -> None: '''Properties for a new Slack channel configuration. :param slack_channel_configuration_name: The name of Slack channel configuration. :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ. :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide. :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options. :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created. :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None :param role: The permission role of Slack channel configuration. Default: - A role will be created. :exampleMetadata: infused Example:: import aws_cdk.aws_chatbot as chatbot # project: codebuild.Project target = chatbot.SlackChannelConfiguration(self, "MySlackChannel", slack_channel_configuration_name="YOUR_CHANNEL_NAME", slack_workspace_id="YOUR_SLACK_WORKSPACE_ID", slack_channel_id="YOUR_SLACK_CHANNEL_ID" ) rule = project.notify_on_build_succeeded("NotifyOnBuildSucceeded", target) ''' if isinstance(log_retention_retry_options, dict): log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**log_retention_retry_options) if __debug__: type_hints = typing.get_type_hints(SlackChannelConfigurationProps.__init__) check_type(argname="argument slack_channel_configuration_name", value=slack_channel_configuration_name, expected_type=type_hints["slack_channel_configuration_name"]) check_type(argname="argument slack_channel_id", value=slack_channel_id, expected_type=type_hints["slack_channel_id"]) check_type(argname="argument slack_workspace_id", value=slack_workspace_id, expected_type=type_hints["slack_workspace_id"]) check_type(argname="argument logging_level", value=logging_level, expected_type=type_hints["logging_level"]) check_type(argname="argument log_retention", value=log_retention, expected_type=type_hints["log_retention"]) check_type(argname="argument log_retention_retry_options", value=log_retention_retry_options, expected_type=type_hints["log_retention_retry_options"]) check_type(argname="argument log_retention_role", value=log_retention_role, expected_type=type_hints["log_retention_role"]) check_type(argname="argument notification_topics", value=notification_topics, expected_type=type_hints["notification_topics"]) check_type(argname="argument role", value=role, expected_type=type_hints["role"]) self._values: typing.Dict[str, typing.Any] = { "slack_channel_configuration_name": slack_channel_configuration_name, "slack_channel_id": slack_channel_id, "slack_workspace_id": slack_workspace_id, } if logging_level is not None: self._values["logging_level"] = logging_level if log_retention is not None: self._values["log_retention"] = log_retention if log_retention_retry_options is not None: self._values["log_retention_retry_options"] = log_retention_retry_options if log_retention_role is not None: self._values["log_retention_role"] = log_retention_role if notification_topics is not None: self._values["notification_topics"] = notification_topics if role is not None: self._values["role"] = role @builtins.property def slack_channel_configuration_name(self) -> builtins.str: '''The name of Slack channel configuration.''' result = self._values.get("slack_channel_configuration_name") assert result is not None, "Required property 'slack_channel_configuration_name' is missing" return typing.cast(builtins.str, result) @builtins.property def slack_channel_id(self) -> builtins.str: '''The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ. ''' result = self._values.get("slack_channel_id") assert result is not None, "Required property 'slack_channel_id' is missing" return typing.cast(builtins.str, result) @builtins.property def slack_workspace_id(self) -> builtins.str: '''The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide. :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro ''' result = self._values.get("slack_workspace_id") assert result is not None, "Required property 'slack_workspace_id' is missing" return typing.cast(builtins.str, result) @builtins.property def logging_level(self) -> typing.Optional[LoggingLevel]: '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. :default: LoggingLevel.NONE ''' result = self._values.get("logging_level") return typing.cast(typing.Optional[LoggingLevel], result) @builtins.property def log_retention(self) -> typing.Optional[_RetentionDays_070f99f0]: '''The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. :default: logs.RetentionDays.INFINITE ''' result = self._values.get("log_retention") return typing.cast(typing.Optional[_RetentionDays_070f99f0], result) @builtins.property def log_retention_retry_options( self, ) -> typing.Optional[_LogRetentionRetryOptions_62d80a14]: '''When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. :default: - Default AWS SDK retry options. ''' result = self._values.get("log_retention_retry_options") return typing.cast(typing.Optional[_LogRetentionRetryOptions_62d80a14], result) @builtins.property def log_retention_role(self) -> typing.Optional[_IRole_235f5d8e]: '''The IAM role for the Lambda function associated with the custom resource that sets the retention policy. :default: - A new role is created. ''' result = self._values.get("log_retention_role") return typing.cast(typing.Optional[_IRole_235f5d8e], result) @builtins.property def notification_topics(self) -> typing.Optional[typing.List[_ITopic_9eca4852]]: '''The SNS topics that deliver notifications to AWS Chatbot. :default: None ''' result = self._values.get("notification_topics") return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]], result) @builtins.property def role(self) -> typing.Optional[_IRole_235f5d8e]: '''The permission role of Slack channel configuration. :default: - A role will be created. ''' result = self._values.get("role") return typing.cast(typing.Optional[_IRole_235f5d8e], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "SlackChannelConfigurationProps(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) __all__ = [ "CfnSlackChannelConfiguration", "CfnSlackChannelConfigurationProps", "ISlackChannelConfiguration", "LoggingLevel", "SlackChannelConfiguration", "SlackChannelConfigurationProps", ] publication.publish()
normal
{ "blob_id": "937fd6aa7bd21258bd6e0f592d94a966519ef885", "index": 9458, "step-1": "<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n <mask token>\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n <mask token>\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n <mask token>\n <mask token>\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelId')\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackChannelId'))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='loggingLevel')\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n return typing.cast(typing.Optional[builtins.str], jsii.get(self,\n 'loggingLevel'))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'logging_level').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'loggingLevel', value)\n\n @builtins.property\n @jsii.member(jsii_name='snsTopicArns')\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'snsTopicArns'))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]\n ) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'sns_topic_arns').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'snsTopicArns', value)\n <mask token>\n <mask token>\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(_CfnResource_9df397a6, metaclass=jsii.\n JSIIMeta, jsii_type='aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration'\n ):\n <mask token>\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n configuration_name: builtins.str, iam_role_arn: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]]=\n None, logging_level: typing.Optional[builtins.str]=None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]]=None,\n user_role_required: typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Create a new ``AWS::Chatbot::SlackChannelConfiguration``.\n\n :param scope: - scope in which this resource is defined.\n :param id: - scoped id of the resource.\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n .__init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = CfnSlackChannelConfigurationProps(configuration_name=\n configuration_name, iam_role_arn=iam_role_arn, slack_channel_id\n =slack_channel_id, slack_workspace_id=slack_workspace_id,\n guardrail_policies=guardrail_policies, logging_level=\n logging_level, sns_topic_arns=sns_topic_arns,\n user_role_required=user_role_required)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='inspect')\n def inspect(self, inspector: _TreeInspector_488e0dd5) ->None:\n \"\"\"Examines the CloudFormation resource and discloses attributes.\n\n :param inspector: - tree inspector to collect and process attributes.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n .inspect)\n check_type(argname='argument inspector', value=inspector,\n expected_type=type_hints['inspector'])\n return typing.cast(None, jsii.invoke(self, 'inspect', [inspector]))\n\n @jsii.member(jsii_name='renderProperties')\n def _render_properties(self, props: typing.Mapping[builtins.str, typing\n .Any]) ->typing.Mapping[builtins.str, typing.Any]:\n \"\"\"\n :param props: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration\n ._render_properties)\n check_type(argname='argument props', value=props, expected_type\n =type_hints['props'])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n invoke(self, 'renderProperties', [props]))\n\n @jsii.python.classproperty\n @jsii.member(jsii_name='CFN_RESOURCE_TYPE_NAME')\n def CFN_RESOURCE_TYPE_NAME(cls) ->builtins.str:\n \"\"\"The CloudFormation resource type name for this resource class.\"\"\"\n return typing.cast(builtins.str, jsii.sget(cls,\n 'CFN_RESOURCE_TYPE_NAME'))\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='cfnProperties')\n def _cfn_properties(self) ->typing.Mapping[builtins.str, typing.Any]:\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.\n get(self, 'cfnProperties'))\n <mask token>\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'configuration_name').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'configurationName', value)\n <mask token>\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelId')\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackChannelId'))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'slack_channel_id').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'slackChannelId', value)\n\n @builtins.property\n @jsii.member(jsii_name='slackWorkspaceId')\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self, 'slackWorkspaceId'))\n <mask token>\n\n @builtins.property\n @jsii.member(jsii_name='guardrailPolicies')\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'guardrailPolicies'))\n\n @guardrail_policies.setter\n def guardrail_policies(self, value: typing.Optional[typing.List[\n builtins.str]]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'guardrail_policies').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'guardrailPolicies', value)\n\n @builtins.property\n @jsii.member(jsii_name='loggingLevel')\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n return typing.cast(typing.Optional[builtins.str], jsii.get(self,\n 'loggingLevel'))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'logging_level').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'loggingLevel', value)\n\n @builtins.property\n @jsii.member(jsii_name='snsTopicArns')\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii\n .get(self, 'snsTopicArns'))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]\n ) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'sns_topic_arns').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'snsTopicArns', value)\n\n @builtins.property\n @jsii.member(jsii_name='userRoleRequired')\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], jsii.get(self, 'userRoleRequired'))\n\n @user_role_required.setter\n def user_role_required(self, value: typing.Optional[typing.Union[\n builtins.bool, _IResolvable_da3f097b]]) ->None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(\n CfnSlackChannelConfiguration, 'user_role_required').fset)\n check_type(argname='argument value', value=value, expected_type\n =type_hints['value'])\n jsii.set(self, 'userRoleRequired', value)\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'configuration_name':\n 'configurationName', 'iam_role_arn': 'iamRoleArn', 'slack_channel_id':\n 'slackChannelId', 'slack_workspace_id': 'slackWorkspaceId',\n 'guardrail_policies': 'guardrailPolicies', 'logging_level':\n 'loggingLevel', 'sns_topic_arns': 'snsTopicArns', 'user_role_required':\n 'userRoleRequired'})\nclass CfnSlackChannelConfigurationProps:\n\n def __init__(self, *, configuration_name: builtins.str, iam_role_arn:\n builtins.str, slack_channel_id: builtins.str, slack_workspace_id:\n builtins.str, guardrail_policies: typing.Optional[typing.Sequence[\n builtins.str]]=None, logging_level: typing.Optional[builtins.str]=\n None, sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]\n ]=None, user_role_required: typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]=None) ->None:\n \"\"\"Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(\n CfnSlackChannelConfigurationProps.__init__)\n check_type(argname='argument configuration_name', value=\n configuration_name, expected_type=type_hints[\n 'configuration_name'])\n check_type(argname='argument iam_role_arn', value=iam_role_arn,\n expected_type=type_hints['iam_role_arn'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument guardrail_policies', value=\n guardrail_policies, expected_type=type_hints[\n 'guardrail_policies'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument sns_topic_arns', value=\n sns_topic_arns, expected_type=type_hints['sns_topic_arns'])\n check_type(argname='argument user_role_required', value=\n user_role_required, expected_type=type_hints[\n 'user_role_required'])\n self._values: typing.Dict[str, typing.Any] = {'configuration_name':\n configuration_name, 'iam_role_arn': iam_role_arn,\n 'slack_channel_id': slack_channel_id, 'slack_workspace_id':\n slack_workspace_id}\n if guardrail_policies is not None:\n self._values['guardrail_policies'] = guardrail_policies\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if sns_topic_arns is not None:\n self._values['sns_topic_arns'] = sns_topic_arns\n if user_role_required is not None:\n self._values['user_role_required'] = user_role_required\n\n @builtins.property\n def configuration_name(self) ->builtins.str:\n \"\"\"The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n \"\"\"\n result = self._values.get('configuration_name')\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) ->builtins.str:\n \"\"\"The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n \"\"\"\n result = self._values.get('iam_role_arn')\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n \"\"\"\n result = self._values.get('guardrail_policies')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[builtins.str]:\n \"\"\"Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) ->typing.Optional[typing.List[builtins.str]]:\n \"\"\"The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n \"\"\"\n result = self._values.get('sns_topic_arns')\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(self) ->typing.Optional[typing.Union[builtins.\n bool, _IResolvable_da3f097b]]:\n \"\"\"Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n \"\"\"\n result = self._values.get('user_role_required')\n return typing.cast(typing.Optional[typing.Union[builtins.bool,\n _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'CfnSlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration')\nclass ISlackChannelConfiguration(_IResource_c80c4260, _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b, typing_extensions.Protocol):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n ...\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n ...\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n ...\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n ...\n\n\nclass _ISlackChannelConfigurationProxy(jsii.proxy_for(_IResource_c80c4260),\n jsii.proxy_for(_IGrantable_71c4f5de), jsii.proxy_for(\n _INotificationRuleTarget_faa3b79b)):\n \"\"\"Represents a Slack channel configuration.\"\"\"\n __jsii_type__: typing.ClassVar[str\n ] = 'aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration'\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\n\n :attribute: true\n \"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n \"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds a statement to the IAM role.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.\n metric)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n\n<mask token>\n\n\[email protected](jsii_type='aws-cdk-lib.aws_chatbot.LoggingLevel')\nclass LoggingLevel(enum.Enum):\n \"\"\"Logging levels include ERROR, INFO, or NONE.\"\"\"\n ERROR = 'ERROR'\n \"\"\"ERROR.\"\"\"\n INFO = 'INFO'\n \"\"\"INFO.\"\"\"\n NONE = 'NONE'\n \"\"\"NONE.\"\"\"\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(_Resource_45bc6135, metaclass=jsii.JSIIMeta,\n jsii_type='aws-cdk-lib.aws_chatbot.SlackChannelConfiguration'):\n \"\"\"A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n\n def __init__(self, scope: constructs.Construct, id: builtins.str, *,\n slack_channel_configuration_name: builtins.str, slack_channel_id:\n builtins.str, slack_workspace_id: builtins.str, logging_level:\n typing.Optional[LoggingLevel]=None, log_retention: typing.Optional[\n _RetentionDays_070f99f0]=None, log_retention_retry_options: typing.\n Optional[_LogRetentionRetryOptions_62d80a14]=None,\n log_retention_role: typing.Optional[_IRole_235f5d8e]=None,\n notification_topics: typing.Optional[typing.Sequence[\n _ITopic_9eca4852]]=None, role: typing.Optional[_IRole_235f5d8e]=None\n ) ->None:\n \"\"\"\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n __init__)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n props = SlackChannelConfigurationProps(slack_channel_configuration_name\n =slack_channel_configuration_name, slack_channel_id=\n slack_channel_id, slack_workspace_id=slack_workspace_id,\n logging_level=logging_level, log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role, notification_topics=\n notification_topics, role=role)\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name='fromSlackChannelConfigurationArn')\n @builtins.classmethod\n def from_slack_channel_configuration_arn(cls, scope: constructs.\n Construct, id: builtins.str, slack_channel_configuration_arn:\n builtins.str) ->ISlackChannelConfiguration:\n \"\"\"Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n from_slack_channel_configuration_arn)\n check_type(argname='argument scope', value=scope, expected_type\n =type_hints['scope'])\n check_type(argname='argument id', value=id, expected_type=\n type_hints['id'])\n check_type(argname='argument slack_channel_configuration_arn',\n value=slack_channel_configuration_arn, expected_type=\n type_hints['slack_channel_configuration_arn'])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls,\n 'fromSlackChannelConfigurationArn', [scope, id,\n slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name='metricAll')\n @builtins.classmethod\n def metric_all(cls, metric_name: builtins.str, *, account: typing.\n Optional[builtins.str]=None, color: typing.Optional[builtins.str]=\n None, dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n metric_all)\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, 'metricAll',\n [metric_name, props]))\n\n @jsii.member(jsii_name='addNotificationTopic')\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852\n ) ->None:\n \"\"\"Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_notification_topic)\n check_type(argname='argument notification_topic', value=\n notification_topic, expected_type=type_hints[\n 'notification_topic'])\n return typing.cast(None, jsii.invoke(self, 'addNotificationTopic',\n [notification_topic]))\n\n @jsii.member(jsii_name='addToRolePolicy')\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) ->None:\n \"\"\"Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n add_to_role_policy)\n check_type(argname='argument statement', value=statement,\n expected_type=type_hints['statement'])\n return typing.cast(None, jsii.invoke(self, 'addToRolePolicy', [\n statement]))\n\n @jsii.member(jsii_name='bindAsNotificationRuleTarget')\n def bind_as_notification_rule_target(self, _scope: constructs.Construct\n ) ->_NotificationRuleTargetConfig_ea27e095:\n \"\"\"Returns a target configuration for notification rule.\n\n :param _scope: -\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.\n bind_as_notification_rule_target)\n check_type(argname='argument _scope', value=_scope,\n expected_type=type_hints['_scope'])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.\n invoke(self, 'bindAsNotificationRuleTarget', [_scope]))\n\n @jsii.member(jsii_name='metric')\n def metric(self, metric_name: builtins.str, *, account: typing.Optional\n [builtins.str]=None, color: typing.Optional[builtins.str]=None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str,\n builtins.str]]=None, label: typing.Optional[builtins.str]=None,\n period: typing.Optional[_Duration_4839e8c3]=None, region: typing.\n Optional[builtins.str]=None, statistic: typing.Optional[builtins.\n str]=None, unit: typing.Optional[_Unit_61bc6f70]=None\n ) ->_Metric_e396a4dc:\n \"\"\"Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n \"\"\"\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric\n )\n check_type(argname='argument metric_name', value=metric_name,\n expected_type=type_hints['metric_name'])\n props = _MetricOptions_1788b62f(account=account, color=color,\n dimensions_map=dimensions_map, label=label, period=period,\n region=region, statistic=statistic, unit=unit)\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, 'metric', [\n metric_name, props]))\n\n @builtins.property\n @jsii.member(jsii_name='grantPrincipal')\n def grant_principal(self) ->_IPrincipal_539bb2fd:\n \"\"\"The principal to grant permissions to.\"\"\"\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self,\n 'grantPrincipal'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationArn')\n def slack_channel_configuration_arn(self) ->builtins.str:\n \"\"\"The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationArn'))\n\n @builtins.property\n @jsii.member(jsii_name='slackChannelConfigurationName')\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n return typing.cast(builtins.str, jsii.get(self,\n 'slackChannelConfigurationName'))\n\n @builtins.property\n @jsii.member(jsii_name='role')\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\"\"\"\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self,\n 'role'))\n\n\[email protected]_type(jsii_type=\n 'aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps',\n jsii_struct_bases=[], name_mapping={'slack_channel_configuration_name':\n 'slackChannelConfigurationName', 'slack_channel_id': 'slackChannelId',\n 'slack_workspace_id': 'slackWorkspaceId', 'logging_level':\n 'loggingLevel', 'log_retention': 'logRetention',\n 'log_retention_retry_options': 'logRetentionRetryOptions',\n 'log_retention_role': 'logRetentionRole', 'notification_topics':\n 'notificationTopics', 'role': 'role'})\nclass SlackChannelConfigurationProps:\n\n def __init__(self, *, slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str, slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel]=None, log_retention:\n typing.Optional[_RetentionDays_070f99f0]=None,\n log_retention_retry_options: typing.Optional[\n _LogRetentionRetryOptions_62d80a14]=None, log_retention_role:\n typing.Optional[_IRole_235f5d8e]=None, notification_topics: typing.\n Optional[typing.Sequence[_ITopic_9eca4852]]=None, role: typing.\n Optional[_IRole_235f5d8e]=None) ->None:\n \"\"\"Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n \"\"\"\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**\n log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps\n .__init__)\n check_type(argname='argument slack_channel_configuration_name',\n value=slack_channel_configuration_name, expected_type=\n type_hints['slack_channel_configuration_name'])\n check_type(argname='argument slack_channel_id', value=\n slack_channel_id, expected_type=type_hints['slack_channel_id'])\n check_type(argname='argument slack_workspace_id', value=\n slack_workspace_id, expected_type=type_hints[\n 'slack_workspace_id'])\n check_type(argname='argument logging_level', value=\n logging_level, expected_type=type_hints['logging_level'])\n check_type(argname='argument log_retention', value=\n log_retention, expected_type=type_hints['log_retention'])\n check_type(argname='argument log_retention_retry_options',\n value=log_retention_retry_options, expected_type=type_hints\n ['log_retention_retry_options'])\n check_type(argname='argument log_retention_role', value=\n log_retention_role, expected_type=type_hints[\n 'log_retention_role'])\n check_type(argname='argument notification_topics', value=\n notification_topics, expected_type=type_hints[\n 'notification_topics'])\n check_type(argname='argument role', value=role, expected_type=\n type_hints['role'])\n self._values: typing.Dict[str, typing.Any] = {\n 'slack_channel_configuration_name':\n slack_channel_configuration_name, 'slack_channel_id':\n slack_channel_id, 'slack_workspace_id': slack_workspace_id}\n if logging_level is not None:\n self._values['logging_level'] = logging_level\n if log_retention is not None:\n self._values['log_retention'] = log_retention\n if log_retention_retry_options is not None:\n self._values['log_retention_retry_options'\n ] = log_retention_retry_options\n if log_retention_role is not None:\n self._values['log_retention_role'] = log_retention_role\n if notification_topics is not None:\n self._values['notification_topics'] = notification_topics\n if role is not None:\n self._values['role'] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) ->builtins.str:\n \"\"\"The name of Slack channel configuration.\"\"\"\n result = self._values.get('slack_channel_configuration_name')\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) ->builtins.str:\n \"\"\"The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n \"\"\"\n result = self._values.get('slack_channel_id')\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) ->builtins.str:\n \"\"\"The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n \"\"\"\n result = self._values.get('slack_workspace_id')\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) ->typing.Optional[LoggingLevel]:\n \"\"\"Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n \"\"\"\n result = self._values.get('logging_level')\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) ->typing.Optional[_RetentionDays_070f99f0]:\n \"\"\"The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n \"\"\"\n result = self._values.get('log_retention')\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(self) ->typing.Optional[\n _LogRetentionRetryOptions_62d80a14]:\n \"\"\"When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n \"\"\"\n result = self._values.get('log_retention_retry_options')\n return typing.cast(typing.Optional[\n _LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n \"\"\"\n result = self._values.get('log_retention_role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) ->typing.Optional[typing.List[\n _ITopic_9eca4852]]:\n \"\"\"The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n \"\"\"\n result = self._values.get('notification_topics')\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]],\n result)\n\n @builtins.property\n def role(self) ->typing.Optional[_IRole_235f5d8e]:\n \"\"\"The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n \"\"\"\n result = self._values.get('role')\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) ->builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) ->builtins.bool:\n return not rhs == self\n\n def __repr__(self) ->str:\n return 'SlackChannelConfigurationProps(%s)' % ', '.join(k + '=' +\n repr(v) for k, v in self._values.items())\n\n\n<mask token>\n", "step-5": "'''\n# AWS::Chatbot Construct Library\n\nAWS Chatbot is an AWS service that enables DevOps and software development teams to use Slack chat rooms to monitor and respond to operational events in their AWS Cloud. AWS Chatbot processes AWS service notifications from Amazon Simple Notification Service (Amazon SNS), and forwards them to Slack chat rooms so teams can analyze and act on them immediately, regardless of location.\n\nThis module is part of the [AWS Cloud Development Kit](https://github.com/aws/aws-cdk) project.\n\n```python\nimport aws_cdk.aws_chatbot as chatbot\nimport aws_cdk.aws_sns as sns\nimport aws_cdk.aws_iam as iam\n\n\nslack_channel = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n)\n\nslack_channel.add_to_role_policy(iam.PolicyStatement(\n effect=iam.Effect.ALLOW,\n actions=[\"s3:GetObject\"\n ],\n resources=[\"arn:aws:s3:::abc/xyz/123.txt\"]\n))\n\nslack_channel.add_notification_topic(sns.Topic(self, \"MyTopic\"))\n```\n\n## Log Group\n\nSlack channel configuration automatically create a log group with the name `/aws/chatbot/<configuration-name>` in `us-east-1` upon first execution with\nlog data set to never expire.\n\nThe `logRetention` property can be used to set a different expiration period. A log group will be created if not already exists.\nIf the log group already exists, it's expiration will be configured to the value specified in this construct (never expire, by default).\n\nBy default, CDK uses the AWS SDK retry options when interacting with the log group. The `logRetentionRetryOptions` property\nallows you to customize the maximum number of retries and base backoff duration.\n\n*Note* that, if `logRetention` is set, a [CloudFormation custom\nresource](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cfn-customresource.html) is added\nto the stack that pre-creates the log group as part of the stack deployment, if it already doesn't exist, and sets the\ncorrect log retention period (never expire, by default).\n'''\nimport abc\nimport builtins\nimport datetime\nimport enum\nimport typing\n\nimport jsii\nimport publication\nimport typing_extensions\n\nfrom typeguard import check_type\n\nfrom .._jsii import *\n\nimport constructs\nfrom .. import (\n CfnResource as _CfnResource_9df397a6,\n Duration as _Duration_4839e8c3,\n IInspectable as _IInspectable_c2943556,\n IResolvable as _IResolvable_da3f097b,\n IResource as _IResource_c80c4260,\n Resource as _Resource_45bc6135,\n TreeInspector as _TreeInspector_488e0dd5,\n)\nfrom ..aws_cloudwatch import (\n Metric as _Metric_e396a4dc,\n MetricOptions as _MetricOptions_1788b62f,\n Unit as _Unit_61bc6f70,\n)\nfrom ..aws_codestarnotifications import (\n INotificationRuleTarget as _INotificationRuleTarget_faa3b79b,\n NotificationRuleTargetConfig as _NotificationRuleTargetConfig_ea27e095,\n)\nfrom ..aws_iam import (\n IGrantable as _IGrantable_71c4f5de,\n IPrincipal as _IPrincipal_539bb2fd,\n IRole as _IRole_235f5d8e,\n PolicyStatement as _PolicyStatement_0fe33853,\n)\nfrom ..aws_logs import (\n LogRetentionRetryOptions as _LogRetentionRetryOptions_62d80a14,\n RetentionDays as _RetentionDays_070f99f0,\n)\nfrom ..aws_sns import ITopic as _ITopic_9eca4852\n\n\[email protected](_IInspectable_c2943556)\nclass CfnSlackChannelConfiguration(\n _CfnResource_9df397a6,\n metaclass=jsii.JSIIMeta,\n jsii_type=\"aws-cdk-lib.aws_chatbot.CfnSlackChannelConfiguration\",\n):\n '''A CloudFormation ``AWS::Chatbot::SlackChannelConfiguration``.\n\n The ``AWS::Chatbot::SlackChannelConfiguration`` resource configures a Slack channel to allow users to use AWS Chatbot with AWS CloudFormation templates.\n\n This resource requires some setup to be done in the AWS Chatbot console. To provide the required Slack workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console, then copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :cloudformationResource: AWS::Chatbot::SlackChannelConfiguration\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration = chatbot.CfnSlackChannelConfiguration(self, \"MyCfnSlackChannelConfiguration\",\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n '''\n\n def __init__(\n self,\n scope: constructs.Construct,\n id: builtins.str,\n *,\n configuration_name: builtins.str,\n iam_role_arn: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,\n logging_level: typing.Optional[builtins.str] = None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,\n user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,\n ) -> None:\n '''Create a new ``AWS::Chatbot::SlackChannelConfiguration``.\n\n :param scope: - scope in which this resource is defined.\n :param id: - scoped id of the resource.\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.__init__)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n props = CfnSlackChannelConfigurationProps(\n configuration_name=configuration_name,\n iam_role_arn=iam_role_arn,\n slack_channel_id=slack_channel_id,\n slack_workspace_id=slack_workspace_id,\n guardrail_policies=guardrail_policies,\n logging_level=logging_level,\n sns_topic_arns=sns_topic_arns,\n user_role_required=user_role_required,\n )\n\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name=\"inspect\")\n def inspect(self, inspector: _TreeInspector_488e0dd5) -> None:\n '''Examines the CloudFormation resource and discloses attributes.\n\n :param inspector: - tree inspector to collect and process attributes.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration.inspect)\n check_type(argname=\"argument inspector\", value=inspector, expected_type=type_hints[\"inspector\"])\n return typing.cast(None, jsii.invoke(self, \"inspect\", [inspector]))\n\n @jsii.member(jsii_name=\"renderProperties\")\n def _render_properties(\n self,\n props: typing.Mapping[builtins.str, typing.Any],\n ) -> typing.Mapping[builtins.str, typing.Any]:\n '''\n :param props: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfiguration._render_properties)\n check_type(argname=\"argument props\", value=props, expected_type=type_hints[\"props\"])\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, \"renderProperties\", [props]))\n\n @jsii.python.classproperty # type: ignore[misc]\n @jsii.member(jsii_name=\"CFN_RESOURCE_TYPE_NAME\")\n def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:\n '''The CloudFormation resource type name for this resource class.'''\n return typing.cast(builtins.str, jsii.sget(cls, \"CFN_RESOURCE_TYPE_NAME\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"attrArn\")\n def attr_arn(self) -> builtins.str:\n '''\n :cloudformationAttribute: Arn\n '''\n return typing.cast(builtins.str, jsii.get(self, \"attrArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"cfnProperties\")\n def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:\n return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, \"cfnProperties\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"configurationName\")\n def configuration_name(self) -> builtins.str:\n '''The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n '''\n return typing.cast(builtins.str, jsii.get(self, \"configurationName\"))\n\n @configuration_name.setter\n def configuration_name(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"configuration_name\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"configurationName\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"iamRoleArn\")\n def iam_role_arn(self) -> builtins.str:\n '''The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n '''\n return typing.cast(builtins.str, jsii.get(self, \"iamRoleArn\"))\n\n @iam_role_arn.setter\n def iam_role_arn(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"iam_role_arn\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"iamRoleArn\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelId\")\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelId\"))\n\n @slack_channel_id.setter\n def slack_channel_id(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"slack_channel_id\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"slackChannelId\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackWorkspaceId\")\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackWorkspaceId\"))\n\n @slack_workspace_id.setter\n def slack_workspace_id(self, value: builtins.str) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"slack_workspace_id\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"slackWorkspaceId\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"guardrailPolicies\")\n def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n '''\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, \"guardrailPolicies\"))\n\n @guardrail_policies.setter\n def guardrail_policies(\n self,\n value: typing.Optional[typing.List[builtins.str]],\n ) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"guardrail_policies\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"guardrailPolicies\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"loggingLevel\")\n def logging_level(self) -> typing.Optional[builtins.str]:\n '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n '''\n return typing.cast(typing.Optional[builtins.str], jsii.get(self, \"loggingLevel\"))\n\n @logging_level.setter\n def logging_level(self, value: typing.Optional[builtins.str]) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"logging_level\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"loggingLevel\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"snsTopicArns\")\n def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n '''\n return typing.cast(typing.Optional[typing.List[builtins.str]], jsii.get(self, \"snsTopicArns\"))\n\n @sns_topic_arns.setter\n def sns_topic_arns(self, value: typing.Optional[typing.List[builtins.str]]) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"sns_topic_arns\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"snsTopicArns\", value)\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"userRoleRequired\")\n def user_role_required(\n self,\n ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:\n '''Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n '''\n return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], jsii.get(self, \"userRoleRequired\"))\n\n @user_role_required.setter\n def user_role_required(\n self,\n value: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]],\n ) -> None:\n if __debug__:\n type_hints = typing.get_type_hints(getattr(CfnSlackChannelConfiguration, \"user_role_required\").fset)\n check_type(argname=\"argument value\", value=value, expected_type=type_hints[\"value\"])\n jsii.set(self, \"userRoleRequired\", value)\n\n\[email protected]_type(\n jsii_type=\"aws-cdk-lib.aws_chatbot.CfnSlackChannelConfigurationProps\",\n jsii_struct_bases=[],\n name_mapping={\n \"configuration_name\": \"configurationName\",\n \"iam_role_arn\": \"iamRoleArn\",\n \"slack_channel_id\": \"slackChannelId\",\n \"slack_workspace_id\": \"slackWorkspaceId\",\n \"guardrail_policies\": \"guardrailPolicies\",\n \"logging_level\": \"loggingLevel\",\n \"sns_topic_arns\": \"snsTopicArns\",\n \"user_role_required\": \"userRoleRequired\",\n },\n)\nclass CfnSlackChannelConfigurationProps:\n def __init__(\n self,\n *,\n configuration_name: builtins.str,\n iam_role_arn: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n guardrail_policies: typing.Optional[typing.Sequence[builtins.str]] = None,\n logging_level: typing.Optional[builtins.str] = None,\n sns_topic_arns: typing.Optional[typing.Sequence[builtins.str]] = None,\n user_role_required: typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]] = None,\n ) -> None:\n '''Properties for defining a ``CfnSlackChannelConfiguration``.\n\n :param configuration_name: The name of the configuration.\n :param iam_role_arn: The ARN of the IAM role that defines the permissions for AWS Chatbot . This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot . To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n :param guardrail_policies: The list of IAM policy ARNs that are applied as channel guardrails. The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n :param sns_topic_arns: The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n :param user_role_required: Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html\n :exampleMetadata: fixture=_generated\n\n Example::\n\n # The code below shows an example of how to instantiate this type.\n # The values are placeholders you should change.\n from aws_cdk import aws_chatbot as chatbot\n \n cfn_slack_channel_configuration_props = chatbot.CfnSlackChannelConfigurationProps(\n configuration_name=\"configurationName\",\n iam_role_arn=\"iamRoleArn\",\n slack_channel_id=\"slackChannelId\",\n slack_workspace_id=\"slackWorkspaceId\",\n \n # the properties below are optional\n guardrail_policies=[\"guardrailPolicies\"],\n logging_level=\"loggingLevel\",\n sns_topic_arns=[\"snsTopicArns\"],\n user_role_required=False\n )\n '''\n if __debug__:\n type_hints = typing.get_type_hints(CfnSlackChannelConfigurationProps.__init__)\n check_type(argname=\"argument configuration_name\", value=configuration_name, expected_type=type_hints[\"configuration_name\"])\n check_type(argname=\"argument iam_role_arn\", value=iam_role_arn, expected_type=type_hints[\"iam_role_arn\"])\n check_type(argname=\"argument slack_channel_id\", value=slack_channel_id, expected_type=type_hints[\"slack_channel_id\"])\n check_type(argname=\"argument slack_workspace_id\", value=slack_workspace_id, expected_type=type_hints[\"slack_workspace_id\"])\n check_type(argname=\"argument guardrail_policies\", value=guardrail_policies, expected_type=type_hints[\"guardrail_policies\"])\n check_type(argname=\"argument logging_level\", value=logging_level, expected_type=type_hints[\"logging_level\"])\n check_type(argname=\"argument sns_topic_arns\", value=sns_topic_arns, expected_type=type_hints[\"sns_topic_arns\"])\n check_type(argname=\"argument user_role_required\", value=user_role_required, expected_type=type_hints[\"user_role_required\"])\n self._values: typing.Dict[str, typing.Any] = {\n \"configuration_name\": configuration_name,\n \"iam_role_arn\": iam_role_arn,\n \"slack_channel_id\": slack_channel_id,\n \"slack_workspace_id\": slack_workspace_id,\n }\n if guardrail_policies is not None:\n self._values[\"guardrail_policies\"] = guardrail_policies\n if logging_level is not None:\n self._values[\"logging_level\"] = logging_level\n if sns_topic_arns is not None:\n self._values[\"sns_topic_arns\"] = sns_topic_arns\n if user_role_required is not None:\n self._values[\"user_role_required\"] = user_role_required\n\n @builtins.property\n def configuration_name(self) -> builtins.str:\n '''The name of the configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-configurationname\n '''\n result = self._values.get(\"configuration_name\")\n assert result is not None, \"Required property 'configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def iam_role_arn(self) -> builtins.str:\n '''The ARN of the IAM role that defines the permissions for AWS Chatbot .\n\n This is a user-definworked role that AWS Chatbot will assume. This is not the service-linked role. For more information, see `IAM Policies for AWS Chatbot <https://docs.aws.amazon.com/chatbot/latest/adminguide/chatbot-iam-policies.html>`_ .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-iamrolearn\n '''\n result = self._values.get(\"iam_role_arn\")\n assert result is not None, \"Required property 'iam_role_arn' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ``ABCBBLZZZ`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackchannelid\n '''\n result = self._values.get(\"slack_channel_id\")\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot .\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in `Setting Up AWS Chatbot with Slack <https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro>`_ in the *AWS Chatbot User Guide* .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-slackworkspaceid\n '''\n result = self._values.get(\"slack_workspace_id\")\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def guardrail_policies(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The list of IAM policy ARNs that are applied as channel guardrails.\n\n The AWS managed 'AdministratorAccess' policy is applied as a default if this is not set.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-guardrailpolicies\n '''\n result = self._values.get(\"guardrail_policies\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def logging_level(self) -> typing.Optional[builtins.str]:\n '''Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n Logging levels include ``ERROR`` , ``INFO`` , or ``NONE`` .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-logginglevel\n '''\n result = self._values.get(\"logging_level\")\n return typing.cast(typing.Optional[builtins.str], result)\n\n @builtins.property\n def sns_topic_arns(self) -> typing.Optional[typing.List[builtins.str]]:\n '''The ARNs of the SNS topics that deliver notifications to AWS Chatbot .\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-snstopicarns\n '''\n result = self._values.get(\"sns_topic_arns\")\n return typing.cast(typing.Optional[typing.List[builtins.str]], result)\n\n @builtins.property\n def user_role_required(\n self,\n ) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]]:\n '''Enables use of a user role requirement in your chat configuration.\n\n :link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-chatbot-slackchannelconfiguration.html#cfn-chatbot-slackchannelconfiguration-userrolerequired\n '''\n result = self._values.get(\"user_role_required\")\n return typing.cast(typing.Optional[typing.Union[builtins.bool, _IResolvable_da3f097b]], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"CfnSlackChannelConfigurationProps(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\[email protected](jsii_type=\"aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration\")\nclass ISlackChannelConfiguration(\n _IResource_c80c4260,\n _IGrantable_71c4f5de,\n _INotificationRuleTarget_faa3b79b,\n typing_extensions.Protocol,\n):\n '''Represents a Slack channel configuration.'''\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n '''\n ...\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.\n\n :attribute: true\n '''\n ...\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n '''\n ...\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds a statement to the IAM role.\n\n :param statement: -\n '''\n ...\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n ...\n\n\nclass _ISlackChannelConfigurationProxy(\n jsii.proxy_for(_IResource_c80c4260), # type: ignore[misc]\n jsii.proxy_for(_IGrantable_71c4f5de), # type: ignore[misc]\n jsii.proxy_for(_INotificationRuleTarget_faa3b79b), # type: ignore[misc]\n):\n '''Represents a Slack channel configuration.'''\n\n __jsii_type__: typing.ClassVar[str] = \"aws-cdk-lib.aws_chatbot.ISlackChannelConfiguration\"\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.\n\n :attribute: true\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.\n\n :attribute: true\n '''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationName\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n\n :attribute: true\n '''\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, \"role\"))\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds a statement to the IAM role.\n\n :param statement: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.add_to_role_policy)\n check_type(argname=\"argument statement\", value=statement, expected_type=type_hints[\"statement\"])\n return typing.cast(None, jsii.invoke(self, \"addToRolePolicy\", [statement]))\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(ISlackChannelConfiguration.metric)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, \"metric\", [metric_name, props]))\n\n# Adding a \"__jsii_proxy_class__(): typing.Type\" function to the interface\ntyping.cast(typing.Any, ISlackChannelConfiguration).__jsii_proxy_class__ = lambda : _ISlackChannelConfigurationProxy\n\n\[email protected](jsii_type=\"aws-cdk-lib.aws_chatbot.LoggingLevel\")\nclass LoggingLevel(enum.Enum):\n '''Logging levels include ERROR, INFO, or NONE.'''\n\n ERROR = \"ERROR\"\n '''ERROR.'''\n INFO = \"INFO\"\n '''INFO.'''\n NONE = \"NONE\"\n '''NONE.'''\n\n\[email protected](ISlackChannelConfiguration)\nclass SlackChannelConfiguration(\n _Resource_45bc6135,\n metaclass=jsii.JSIIMeta,\n jsii_type=\"aws-cdk-lib.aws_chatbot.SlackChannelConfiguration\",\n):\n '''A new Slack channel configuration.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n '''\n\n def __init__(\n self,\n scope: constructs.Construct,\n id: builtins.str,\n *,\n slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel] = None,\n log_retention: typing.Optional[_RetentionDays_070f99f0] = None,\n log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,\n log_retention_role: typing.Optional[_IRole_235f5d8e] = None,\n notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,\n role: typing.Optional[_IRole_235f5d8e] = None,\n ) -> None:\n '''\n :param scope: -\n :param id: -\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.__init__)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n props = SlackChannelConfigurationProps(\n slack_channel_configuration_name=slack_channel_configuration_name,\n slack_channel_id=slack_channel_id,\n slack_workspace_id=slack_workspace_id,\n logging_level=logging_level,\n log_retention=log_retention,\n log_retention_retry_options=log_retention_retry_options,\n log_retention_role=log_retention_role,\n notification_topics=notification_topics,\n role=role,\n )\n\n jsii.create(self.__class__, self, [scope, id, props])\n\n @jsii.member(jsii_name=\"fromSlackChannelConfigurationArn\") # type: ignore[misc]\n @builtins.classmethod\n def from_slack_channel_configuration_arn(\n cls,\n scope: constructs.Construct,\n id: builtins.str,\n slack_channel_configuration_arn: builtins.str,\n ) -> ISlackChannelConfiguration:\n '''Import an existing Slack channel configuration provided an ARN.\n\n :param scope: The parent creating construct.\n :param id: The construct's name.\n :param slack_channel_configuration_arn: configuration ARN (i.e. arn:aws:chatbot::1234567890:chat-configuration/slack-channel/my-slack).\n\n :return: a reference to the existing Slack channel configuration\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.from_slack_channel_configuration_arn)\n check_type(argname=\"argument scope\", value=scope, expected_type=type_hints[\"scope\"])\n check_type(argname=\"argument id\", value=id, expected_type=type_hints[\"id\"])\n check_type(argname=\"argument slack_channel_configuration_arn\", value=slack_channel_configuration_arn, expected_type=type_hints[\"slack_channel_configuration_arn\"])\n return typing.cast(ISlackChannelConfiguration, jsii.sinvoke(cls, \"fromSlackChannelConfigurationArn\", [scope, id, slack_channel_configuration_arn]))\n\n @jsii.member(jsii_name=\"metricAll\") # type: ignore[misc]\n @builtins.classmethod\n def metric_all(\n cls,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for All SlackChannelConfigurations.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric_all)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.sinvoke(cls, \"metricAll\", [metric_name, props]))\n\n @jsii.member(jsii_name=\"addNotificationTopic\")\n def add_notification_topic(self, notification_topic: _ITopic_9eca4852) -> None:\n '''Adds a SNS topic that deliver notifications to AWS Chatbot.\n\n :param notification_topic: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.add_notification_topic)\n check_type(argname=\"argument notification_topic\", value=notification_topic, expected_type=type_hints[\"notification_topic\"])\n return typing.cast(None, jsii.invoke(self, \"addNotificationTopic\", [notification_topic]))\n\n @jsii.member(jsii_name=\"addToRolePolicy\")\n def add_to_role_policy(self, statement: _PolicyStatement_0fe33853) -> None:\n '''Adds extra permission to iam-role of Slack channel configuration.\n\n :param statement: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.add_to_role_policy)\n check_type(argname=\"argument statement\", value=statement, expected_type=type_hints[\"statement\"])\n return typing.cast(None, jsii.invoke(self, \"addToRolePolicy\", [statement]))\n\n @jsii.member(jsii_name=\"bindAsNotificationRuleTarget\")\n def bind_as_notification_rule_target(\n self,\n _scope: constructs.Construct,\n ) -> _NotificationRuleTargetConfig_ea27e095:\n '''Returns a target configuration for notification rule.\n\n :param _scope: -\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.bind_as_notification_rule_target)\n check_type(argname=\"argument _scope\", value=_scope, expected_type=type_hints[\"_scope\"])\n return typing.cast(_NotificationRuleTargetConfig_ea27e095, jsii.invoke(self, \"bindAsNotificationRuleTarget\", [_scope]))\n\n @jsii.member(jsii_name=\"metric\")\n def metric(\n self,\n metric_name: builtins.str,\n *,\n account: typing.Optional[builtins.str] = None,\n color: typing.Optional[builtins.str] = None,\n dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,\n label: typing.Optional[builtins.str] = None,\n period: typing.Optional[_Duration_4839e8c3] = None,\n region: typing.Optional[builtins.str] = None,\n statistic: typing.Optional[builtins.str] = None,\n unit: typing.Optional[_Unit_61bc6f70] = None,\n ) -> _Metric_e396a4dc:\n '''Return the given named metric for this SlackChannelConfiguration.\n\n :param metric_name: -\n :param account: Account which this metric comes from. Default: - Deployment account.\n :param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color\n :param dimensions_map: Dimensions of the metric. Default: - No dimensions.\n :param label: Label for this metric when added to a Graph in a Dashboard. You can use `dynamic labels <https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/graph-dynamic-labels.html>`_ to show summary information about the entire displayed time series in the legend. For example, if you use:: [max: ${MAX}] MyMetric As the metric label, the maximum value in the visible range will be shown next to the time series name in the graph's legend. Default: - No label\n :param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)\n :param region: Region which this metric comes from. Default: - Deployment region.\n :param statistic: What function to use for aggregating. Can be one of the following: - \"Minimum\" | \"min\" - \"Maximum\" | \"max\" - \"Average\" | \"avg\" - \"Sum\" | \"sum\" - \"SampleCount | \"n\" - \"pNN.NN\" Default: Average\n :param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream\n '''\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfiguration.metric)\n check_type(argname=\"argument metric_name\", value=metric_name, expected_type=type_hints[\"metric_name\"])\n props = _MetricOptions_1788b62f(\n account=account,\n color=color,\n dimensions_map=dimensions_map,\n label=label,\n period=period,\n region=region,\n statistic=statistic,\n unit=unit,\n )\n\n return typing.cast(_Metric_e396a4dc, jsii.invoke(self, \"metric\", [metric_name, props]))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"grantPrincipal\")\n def grant_principal(self) -> _IPrincipal_539bb2fd:\n '''The principal to grant permissions to.'''\n return typing.cast(_IPrincipal_539bb2fd, jsii.get(self, \"grantPrincipal\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationArn\")\n def slack_channel_configuration_arn(self) -> builtins.str:\n '''The ARN of the Slack channel configuration In the form of arn:aws:chatbot:{region}:{account}:chat-configuration/slack-channel/{slackChannelName}.'''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationArn\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"slackChannelConfigurationName\")\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.'''\n return typing.cast(builtins.str, jsii.get(self, \"slackChannelConfigurationName\"))\n\n @builtins.property # type: ignore[misc]\n @jsii.member(jsii_name=\"role\")\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.'''\n return typing.cast(typing.Optional[_IRole_235f5d8e], jsii.get(self, \"role\"))\n\n\[email protected]_type(\n jsii_type=\"aws-cdk-lib.aws_chatbot.SlackChannelConfigurationProps\",\n jsii_struct_bases=[],\n name_mapping={\n \"slack_channel_configuration_name\": \"slackChannelConfigurationName\",\n \"slack_channel_id\": \"slackChannelId\",\n \"slack_workspace_id\": \"slackWorkspaceId\",\n \"logging_level\": \"loggingLevel\",\n \"log_retention\": \"logRetention\",\n \"log_retention_retry_options\": \"logRetentionRetryOptions\",\n \"log_retention_role\": \"logRetentionRole\",\n \"notification_topics\": \"notificationTopics\",\n \"role\": \"role\",\n },\n)\nclass SlackChannelConfigurationProps:\n def __init__(\n self,\n *,\n slack_channel_configuration_name: builtins.str,\n slack_channel_id: builtins.str,\n slack_workspace_id: builtins.str,\n logging_level: typing.Optional[LoggingLevel] = None,\n log_retention: typing.Optional[_RetentionDays_070f99f0] = None,\n log_retention_retry_options: typing.Optional[_LogRetentionRetryOptions_62d80a14] = None,\n log_retention_role: typing.Optional[_IRole_235f5d8e] = None,\n notification_topics: typing.Optional[typing.Sequence[_ITopic_9eca4852]] = None,\n role: typing.Optional[_IRole_235f5d8e] = None,\n ) -> None:\n '''Properties for a new Slack channel configuration.\n\n :param slack_channel_configuration_name: The name of Slack channel configuration.\n :param slack_channel_id: The ID of the Slack channel. To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n :param slack_workspace_id: The ID of the Slack workspace authorized with AWS Chatbot. To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console. Then you can copy and paste the workspace ID from the console. For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n :param logging_level: Specifies the logging level for this configuration. This property affects the log entries pushed to Amazon CloudWatch Logs. Default: LoggingLevel.NONE\n :param log_retention: The number of days log events are kept in CloudWatch Logs. When updating this property, unsetting it doesn't remove the log retention policy. To remove the retention policy, set the value to ``INFINITE``. Default: logs.RetentionDays.INFINITE\n :param log_retention_retry_options: When log retention is specified, a custom resource attempts to create the CloudWatch log group. These options control the retry policy when interacting with CloudWatch APIs. Default: - Default AWS SDK retry options.\n :param log_retention_role: The IAM role for the Lambda function associated with the custom resource that sets the retention policy. Default: - A new role is created.\n :param notification_topics: The SNS topics that deliver notifications to AWS Chatbot. Default: None\n :param role: The permission role of Slack channel configuration. Default: - A role will be created.\n\n :exampleMetadata: infused\n\n Example::\n\n import aws_cdk.aws_chatbot as chatbot\n \n # project: codebuild.Project\n \n \n target = chatbot.SlackChannelConfiguration(self, \"MySlackChannel\",\n slack_channel_configuration_name=\"YOUR_CHANNEL_NAME\",\n slack_workspace_id=\"YOUR_SLACK_WORKSPACE_ID\",\n slack_channel_id=\"YOUR_SLACK_CHANNEL_ID\"\n )\n \n rule = project.notify_on_build_succeeded(\"NotifyOnBuildSucceeded\", target)\n '''\n if isinstance(log_retention_retry_options, dict):\n log_retention_retry_options = _LogRetentionRetryOptions_62d80a14(**log_retention_retry_options)\n if __debug__:\n type_hints = typing.get_type_hints(SlackChannelConfigurationProps.__init__)\n check_type(argname=\"argument slack_channel_configuration_name\", value=slack_channel_configuration_name, expected_type=type_hints[\"slack_channel_configuration_name\"])\n check_type(argname=\"argument slack_channel_id\", value=slack_channel_id, expected_type=type_hints[\"slack_channel_id\"])\n check_type(argname=\"argument slack_workspace_id\", value=slack_workspace_id, expected_type=type_hints[\"slack_workspace_id\"])\n check_type(argname=\"argument logging_level\", value=logging_level, expected_type=type_hints[\"logging_level\"])\n check_type(argname=\"argument log_retention\", value=log_retention, expected_type=type_hints[\"log_retention\"])\n check_type(argname=\"argument log_retention_retry_options\", value=log_retention_retry_options, expected_type=type_hints[\"log_retention_retry_options\"])\n check_type(argname=\"argument log_retention_role\", value=log_retention_role, expected_type=type_hints[\"log_retention_role\"])\n check_type(argname=\"argument notification_topics\", value=notification_topics, expected_type=type_hints[\"notification_topics\"])\n check_type(argname=\"argument role\", value=role, expected_type=type_hints[\"role\"])\n self._values: typing.Dict[str, typing.Any] = {\n \"slack_channel_configuration_name\": slack_channel_configuration_name,\n \"slack_channel_id\": slack_channel_id,\n \"slack_workspace_id\": slack_workspace_id,\n }\n if logging_level is not None:\n self._values[\"logging_level\"] = logging_level\n if log_retention is not None:\n self._values[\"log_retention\"] = log_retention\n if log_retention_retry_options is not None:\n self._values[\"log_retention_retry_options\"] = log_retention_retry_options\n if log_retention_role is not None:\n self._values[\"log_retention_role\"] = log_retention_role\n if notification_topics is not None:\n self._values[\"notification_topics\"] = notification_topics\n if role is not None:\n self._values[\"role\"] = role\n\n @builtins.property\n def slack_channel_configuration_name(self) -> builtins.str:\n '''The name of Slack channel configuration.'''\n result = self._values.get(\"slack_channel_configuration_name\")\n assert result is not None, \"Required property 'slack_channel_configuration_name' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_channel_id(self) -> builtins.str:\n '''The ID of the Slack channel.\n\n To get the ID, open Slack, right click on the channel name in the left pane, then choose Copy Link.\n The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ.\n '''\n result = self._values.get(\"slack_channel_id\")\n assert result is not None, \"Required property 'slack_channel_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def slack_workspace_id(self) -> builtins.str:\n '''The ID of the Slack workspace authorized with AWS Chatbot.\n\n To get the workspace ID, you must perform the initial authorization flow with Slack in the AWS Chatbot console.\n Then you can copy and paste the workspace ID from the console.\n For more details, see steps 1-4 in Setting Up AWS Chatbot with Slack in the AWS Chatbot User Guide.\n\n :see: https://docs.aws.amazon.com/chatbot/latest/adminguide/setting-up.html#Setup_intro\n '''\n result = self._values.get(\"slack_workspace_id\")\n assert result is not None, \"Required property 'slack_workspace_id' is missing\"\n return typing.cast(builtins.str, result)\n\n @builtins.property\n def logging_level(self) -> typing.Optional[LoggingLevel]:\n '''Specifies the logging level for this configuration.\n\n This property affects the log entries pushed to Amazon CloudWatch Logs.\n\n :default: LoggingLevel.NONE\n '''\n result = self._values.get(\"logging_level\")\n return typing.cast(typing.Optional[LoggingLevel], result)\n\n @builtins.property\n def log_retention(self) -> typing.Optional[_RetentionDays_070f99f0]:\n '''The number of days log events are kept in CloudWatch Logs.\n\n When updating\n this property, unsetting it doesn't remove the log retention policy. To\n remove the retention policy, set the value to ``INFINITE``.\n\n :default: logs.RetentionDays.INFINITE\n '''\n result = self._values.get(\"log_retention\")\n return typing.cast(typing.Optional[_RetentionDays_070f99f0], result)\n\n @builtins.property\n def log_retention_retry_options(\n self,\n ) -> typing.Optional[_LogRetentionRetryOptions_62d80a14]:\n '''When log retention is specified, a custom resource attempts to create the CloudWatch log group.\n\n These options control the retry policy when interacting with CloudWatch APIs.\n\n :default: - Default AWS SDK retry options.\n '''\n result = self._values.get(\"log_retention_retry_options\")\n return typing.cast(typing.Optional[_LogRetentionRetryOptions_62d80a14], result)\n\n @builtins.property\n def log_retention_role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The IAM role for the Lambda function associated with the custom resource that sets the retention policy.\n\n :default: - A new role is created.\n '''\n result = self._values.get(\"log_retention_role\")\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n @builtins.property\n def notification_topics(self) -> typing.Optional[typing.List[_ITopic_9eca4852]]:\n '''The SNS topics that deliver notifications to AWS Chatbot.\n\n :default: None\n '''\n result = self._values.get(\"notification_topics\")\n return typing.cast(typing.Optional[typing.List[_ITopic_9eca4852]], result)\n\n @builtins.property\n def role(self) -> typing.Optional[_IRole_235f5d8e]:\n '''The permission role of Slack channel configuration.\n\n :default: - A role will be created.\n '''\n result = self._values.get(\"role\")\n return typing.cast(typing.Optional[_IRole_235f5d8e], result)\n\n def __eq__(self, rhs: typing.Any) -> builtins.bool:\n return isinstance(rhs, self.__class__) and rhs._values == self._values\n\n def __ne__(self, rhs: typing.Any) -> builtins.bool:\n return not (rhs == self)\n\n def __repr__(self) -> str:\n return \"SlackChannelConfigurationProps(%s)\" % \", \".join(\n k + \"=\" + repr(v) for k, v in self._values.items()\n )\n\n\n__all__ = [\n \"CfnSlackChannelConfiguration\",\n \"CfnSlackChannelConfigurationProps\",\n \"ISlackChannelConfiguration\",\n \"LoggingLevel\",\n \"SlackChannelConfiguration\",\n \"SlackChannelConfigurationProps\",\n]\n\npublication.publish()\n", "step-ids": [ 39, 61, 66, 75, 85 ] }
[ 39, 61, 66, 75, 85 ]
import pygame import os import random #Vx = float(input("Input Vx : ")) #Vy = float(input("Input Vy : ")) Vx = 20 Vy = 20 #GEOMETRY screen_width = 1000 screen_height = 600 FPS = 30 #COLOR BLUE = (0, 0, 255) BLACK = (0, 0, 0) GREEN = (204, 153, 255) RED = (255, 0, 0) WHITE = (155, 25, 0) colorList = [BLUE, BLACK, GREEN, RED, WHITE] #Initialize pygame pygame.init() path = os.path.dirname(__file__) img_path = os.path.join(path, 'Gallery') background = pygame.image.load('Gallery/parallax.png') background = pygame.transform.scale(background, [screen_width, screen_height]) win = pygame.display.set_mode([screen_width, screen_height]) pygame.display.set_caption("Stateczek shoot Projectile") clock = pygame.time.Clock() pixelRatio = 10 accel = -9.81 timeStep = 1 / FPS font = pygame.font.SysFont('comic', 50, False, False) #####CREATE SPRITE##### class player(pygame.sprite.Sprite): image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png')) # เรียกรูปตัวละครมาเก็บในตัวแปร def __init__(self, x, y): # ฟังก์ชั่นนี้เอาไว้กำหนดตัวแปร pygame.sprite.Sprite.__init__(self) self.x = x self.y = y self.move = 10 def draw(self, win): win.blit(self.image, (self.x, self.y)) #####CREATE PROJECTILE SHOOT##### class projectile(pygame.sprite.Sprite): def __init__(self, x, y, ux, uy): pygame.sprite.Sprite.__init__(self) self.x = x + 30 self.y = y self.startX = self.x self.startY = self.y self.horVel = ux self.verVel = uy self.color = random.choice(colorList) self.bulletTime = 0.0 self.status = 1 def update(self): global maxHeight global maxHeightPos global landingPos global ranges global trace if self.y <= screen_height: self.bulletTime += timeStep self.x = (self.horVel * self.bulletTime) * pixelRatio + self.startX self.y = -(self.verVel * self.bulletTime + 0.5 * accel * ( self.bulletTime ** 2)) * pixelRatio + self.startY trace.append([self.x, self.y]) if self.x >= screen_width: self.status = 0 if self.y < 0: self.status = 0 else: # กระสุนลงพื้น self.status = 0 pygame.display.update() def draw(self, win): pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6) for t in traceShow: pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1) #####CREATE ENEMYS##### class enemy(pygame.sprite.Sprite): im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png')) im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png')) im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png')) imageList = [im, im2, im3] def __init__(self, x, y): pygame.sprite.Sprite.__init__(self) self.x = x self.y = y self.hitbox = (self.x, self.y, 60, 60) self.vel = 6 self.imageRandom = random.choice(self.imageList) def draw(self, win): self.move_enemy() win.blit(self.imageRandom, (self.x, self.y)) def move_enemy(self): if self.vel > 0: if self.y + self.vel < 560: self.y += self.vel self.hitbox = (self.x, self.y, 60, 60) else: self.vel = self.vel * -1 else: if self.y - self.vel > 10: self.y += self.vel self.hitbox = (self.x, self.y, 60, 60) else: self.vel = self.vel * -1 #####FUNCTION SHOW DISPLAY#### def display(s): win.blit(background, (0, 0)) player1.draw(win) Monster1.draw(win) Monster2.draw(win) Monster3.draw(win) score = font.render('Score : ' + str(s), 1, (0, 0, 0)) win.blit(score, (430, 30)) for bullet in bullets: bullet.draw(win) pygame.display.update() # mainloop Y = 300 X = 30 X1 = random.randint(500, 590) X2 = random.randint(660, 760) X3 = random.randint(830, 900) Y1 = random.randint(60, 720) Y2 = random.randint(40, 720) Y3 = random.randint(60, 720) player1 = player(X, Y) Monster1 = enemy(X1, Y1) Monster2 = enemy(X2, Y2) Monster3 = enemy(X3, Y3) bullets = [] trace = [] traceShow = [] color = [] resetTrace = False shootStage = 0 showText = 0 maxHeight = 0 ranges = 0 r = 1 s = 0 ### START ### runing = True while runing: clock.tick(FPS) for event in pygame.event.get(): if event.type == pygame.QUIT: runing = False keys = pygame.key.get_pressed() if keys[pygame.K_UP]: if player1.y > 0: player1.y -= player1.move else: player1.y = 0 if keys[pygame.K_DOWN]: if player1.y < screen_height-30: player1.y += player1.move print(player1.y) else: player1.y = screen_height-30 print(player1.y) if keys[pygame.K_RIGHT]: if player1.x < screen_width-540: player1.x += player1.move print(player1.x) else: player1.x = screen_width-540 print(player1.x) if keys[pygame.K_LEFT]: if player1.x > 0: player1.x -= player1.move else: player1.x = 0 if keys[pygame.K_SPACE]: if shootStage == 0: bullets.append(projectile(player1.x, player1.y, Vx, Vy)) shootStage = 1 trace.clear() for bullet in bullets: bullet.update() traceShow = trace if bullet.y - 5 < Monster1.hitbox[1] + Monster1.hitbox[3] and bullet.y + 5 > Monster1.hitbox[1]: if bullet.x + 5 > Monster1.hitbox[0] and bullet.x - 5 < Monster1.hitbox[0] + Monster1.hitbox[2]: bullet.status = 0 X1 = random.randint(500, 590) Y1 = random.randint(60, 720) Monster1 = enemy(X1, Y1) s += 1 if bullet.y - 5 < Monster2.hitbox[1] + Monster2.hitbox[3] and bullet.y + 5 > Monster2.hitbox[1]: if bullet.x + 5 > Monster2.hitbox[0] and bullet.x - 5 < Monster2.hitbox[0] + Monster2.hitbox[2]: bullet.status = 0 X2 = random.randint(660, 760) Y2 = random.randint(60, 720) Monster2 = enemy(X2, Y2) s += 1 if bullet.y - 5 < Monster3.hitbox[1] + Monster3.hitbox[3] and bullet.y + 5 > Monster3.hitbox[ 1]: if bullet.x + 5 > Monster3.hitbox[0] and bullet.x - 5 < Monster3.hitbox[0] + Monster3.hitbox[ 2]: bullet.status = 0 X3 = random.randint(830, 900) Y3 = random.randint(60, 720) Monster3 = enemy(X3, Y3) s += 1 if bullet.status == 0: shootStage = 0 bullets.pop(bullets.index(bullet)) display(s) pygame.display.update() pygame.quit()
normal
{ "blob_id": "0dd5511c0e39f113c46785be78a898e79bc45a21", "index": 5188, "step-1": "<mask token>\n\n\nclass projectile(pygame.sprite.Sprite):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass player(pygame.sprite.Sprite):\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.move = 10\n\n def draw(self, win):\n win.blit(self.image, (self.x, self.y))\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass player(pygame.sprite.Sprite):\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png'))\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.move = 10\n\n def draw(self, win):\n win.blit(self.image, (self.x, self.y))\n\n\nclass projectile(pygame.sprite.Sprite):\n\n def __init__(self, x, y, ux, uy):\n pygame.sprite.Sprite.__init__(self)\n self.x = x + 30\n self.y = y\n self.startX = self.x\n self.startY = self.y\n self.horVel = ux\n self.verVel = uy\n self.color = random.choice(colorList)\n self.bulletTime = 0.0\n self.status = 1\n\n def update(self):\n global maxHeight\n global maxHeightPos\n global landingPos\n global ranges\n global trace\n if self.y <= screen_height:\n self.bulletTime += timeStep\n self.x = self.horVel * self.bulletTime * pixelRatio + self.startX\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * self.\n bulletTime ** 2) * pixelRatio + self.startY\n trace.append([self.x, self.y])\n if self.x >= screen_width:\n self.status = 0\n if self.y < 0:\n self.status = 0\n else:\n self.status = 0\n pygame.display.update()\n\n def draw(self, win):\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\n for t in traceShow:\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\n\n\nclass enemy(pygame.sprite.Sprite):\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\n imageList = [im, im2, im3]\n\n def __init__(self, x, y):\n pygame.sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.hitbox = self.x, self.y, 60, 60\n self.vel = 6\n self.imageRandom = random.choice(self.imageList)\n\n def draw(self, win):\n self.move_enemy()\n win.blit(self.imageRandom, (self.x, self.y))\n\n def move_enemy(self):\n if self.vel > 0:\n if self.y + self.vel < 560:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n elif self.y - self.vel > 10:\n self.y += self.vel\n self.hitbox = self.x, self.y, 60, 60\n else:\n self.vel = self.vel * -1\n\n\ndef display(s):\n win.blit(background, (0, 0))\n player1.draw(win)\n Monster1.draw(win)\n Monster2.draw(win)\n Monster3.draw(win)\n score = font.render('Score : ' + str(s), 1, (0, 0, 0))\n win.blit(score, (430, 30))\n for bullet in bullets:\n bullet.draw(win)\n pygame.display.update()\n\n\n<mask token>\n", "step-5": "import pygame\r\nimport os\r\nimport random\r\n\r\n\r\n#Vx = float(input(\"Input Vx : \"))\r\n#Vy = float(input(\"Input Vy : \"))\r\nVx = 20\r\nVy = 20\r\n\r\n#GEOMETRY\r\nscreen_width = 1000\r\nscreen_height = 600\r\nFPS = 30\r\n\r\n#COLOR\r\nBLUE = (0, 0, 255)\r\nBLACK = (0, 0, 0)\r\nGREEN = (204, 153, 255)\r\nRED = (255, 0, 0)\r\nWHITE = (155, 25, 0)\r\ncolorList = [BLUE, BLACK, GREEN, RED, WHITE]\r\n\r\n#Initialize pygame\r\npygame.init()\r\npath = os.path.dirname(__file__)\r\nimg_path = os.path.join(path, 'Gallery')\r\nbackground = pygame.image.load('Gallery/parallax.png')\r\nbackground = pygame.transform.scale(background, [screen_width, screen_height])\r\nwin = pygame.display.set_mode([screen_width, screen_height])\r\npygame.display.set_caption(\"Stateczek shoot Projectile\")\r\nclock = pygame.time.Clock()\r\n\r\npixelRatio = 10\r\naccel = -9.81\r\ntimeStep = 1 / FPS\r\nfont = pygame.font.SysFont('comic', 50, False, False)\r\n\r\n#####CREATE SPRITE#####\r\nclass player(pygame.sprite.Sprite):\r\n image = pygame.image.load(os.path.join(path, 'Gallery', 'life.png')) # เรียกรูปตัวละครมาเก็บในตัวแปร\r\n def __init__(self, x, y): # ฟังก์ชั่นนี้เอาไว้กำหนดตัวแปร\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x\r\n self.y = y\r\n self.move = 10\r\n\r\n def draw(self, win):\r\n win.blit(self.image, (self.x, self.y))\r\n\r\n#####CREATE PROJECTILE SHOOT#####\r\nclass projectile(pygame.sprite.Sprite):\r\n def __init__(self, x, y, ux, uy):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x + 30\r\n self.y = y\r\n self.startX = self.x\r\n self.startY = self.y\r\n self.horVel = ux\r\n self.verVel = uy\r\n self.color = random.choice(colorList)\r\n self.bulletTime = 0.0\r\n self.status = 1\r\n\r\n def update(self):\r\n global maxHeight\r\n global maxHeightPos\r\n global landingPos\r\n global ranges\r\n global trace\r\n if self.y <= screen_height:\r\n self.bulletTime += timeStep\r\n self.x = (self.horVel * self.bulletTime) * pixelRatio + self.startX\r\n self.y = -(self.verVel * self.bulletTime + 0.5 * accel * (\r\n self.bulletTime ** 2)) * pixelRatio + self.startY\r\n\r\n trace.append([self.x, self.y])\r\n if self.x >= screen_width:\r\n self.status = 0\r\n if self.y < 0:\r\n self.status = 0\r\n else: # กระสุนลงพื้น\r\n self.status = 0\r\n\r\n pygame.display.update()\r\n\r\n def draw(self, win):\r\n pygame.draw.circle(win, self.color, (round(self.x), round(self.y)), 6)\r\n for t in traceShow:\r\n pygame.draw.circle(win, self.color, (round(t[0]), round(t[1])), 1)\r\n\r\n\r\n#####CREATE ENEMYS#####\r\nclass enemy(pygame.sprite.Sprite):\r\n im = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n im2 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n im3 = pygame.image.load(os.path.join(path, 'Gallery', 'stateczek.png'))\r\n imageList = [im, im2, im3]\r\n\r\n def __init__(self, x, y):\r\n pygame.sprite.Sprite.__init__(self)\r\n self.x = x\r\n self.y = y\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n self.vel = 6\r\n self.imageRandom = random.choice(self.imageList)\r\n\r\n def draw(self, win):\r\n self.move_enemy()\r\n win.blit(self.imageRandom, (self.x, self.y))\r\n\r\n def move_enemy(self):\r\n if self.vel > 0:\r\n if self.y + self.vel < 560:\r\n self.y += self.vel\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n else:\r\n self.vel = self.vel * -1\r\n else:\r\n if self.y - self.vel > 10:\r\n self.y += self.vel\r\n self.hitbox = (self.x, self.y, 60, 60)\r\n else:\r\n self.vel = self.vel * -1\r\n\r\n\r\n#####FUNCTION SHOW DISPLAY####\r\ndef display(s):\r\n win.blit(background, (0, 0))\r\n player1.draw(win)\r\n Monster1.draw(win)\r\n Monster2.draw(win)\r\n Monster3.draw(win)\r\n score = font.render('Score : ' + str(s), 1, (0, 0, 0))\r\n win.blit(score, (430, 30))\r\n for bullet in bullets:\r\n bullet.draw(win)\r\n pygame.display.update()\r\n\r\n\r\n# mainloop\r\nY = 300\r\nX = 30\r\nX1 = random.randint(500, 590)\r\nX2 = random.randint(660, 760)\r\nX3 = random.randint(830, 900)\r\nY1 = random.randint(60, 720)\r\nY2 = random.randint(40, 720)\r\nY3 = random.randint(60, 720)\r\n\r\nplayer1 = player(X, Y)\r\nMonster1 = enemy(X1, Y1)\r\nMonster2 = enemy(X2, Y2)\r\nMonster3 = enemy(X3, Y3)\r\n\r\nbullets = []\r\ntrace = []\r\ntraceShow = []\r\ncolor = []\r\nresetTrace = False\r\nshootStage = 0\r\nshowText = 0\r\nmaxHeight = 0\r\nranges = 0\r\nr = 1\r\ns = 0\r\n\r\n### START ###\r\nruning = True\r\nwhile runing:\r\n clock.tick(FPS)\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n runing = False\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_UP]:\r\n if player1.y > 0:\r\n player1.y -= player1.move\r\n else:\r\n player1.y = 0\r\n\r\n if keys[pygame.K_DOWN]:\r\n if player1.y < screen_height-30:\r\n player1.y += player1.move\r\n print(player1.y)\r\n else:\r\n player1.y = screen_height-30\r\n print(player1.y)\r\n if keys[pygame.K_RIGHT]:\r\n if player1.x < screen_width-540:\r\n player1.x += player1.move\r\n print(player1.x)\r\n else:\r\n player1.x = screen_width-540\r\n print(player1.x)\r\n if keys[pygame.K_LEFT]:\r\n if player1.x > 0:\r\n player1.x -= player1.move\r\n else:\r\n player1.x = 0\r\n\r\n if keys[pygame.K_SPACE]:\r\n if shootStage == 0:\r\n bullets.append(projectile(player1.x, player1.y, Vx, Vy))\r\n shootStage = 1\r\n trace.clear()\r\n\r\n for bullet in bullets:\r\n bullet.update()\r\n traceShow = trace\r\n if bullet.y - 5 < Monster1.hitbox[1] + Monster1.hitbox[3] and bullet.y + 5 > Monster1.hitbox[1]:\r\n if bullet.x + 5 > Monster1.hitbox[0] and bullet.x - 5 < Monster1.hitbox[0] + Monster1.hitbox[2]:\r\n bullet.status = 0\r\n X1 = random.randint(500, 590)\r\n Y1 = random.randint(60, 720)\r\n\r\n Monster1 = enemy(X1, Y1)\r\n s += 1\r\n if bullet.y - 5 < Monster2.hitbox[1] + Monster2.hitbox[3] and bullet.y + 5 > Monster2.hitbox[1]:\r\n if bullet.x + 5 > Monster2.hitbox[0] and bullet.x - 5 < Monster2.hitbox[0] + Monster2.hitbox[2]:\r\n bullet.status = 0\r\n X2 = random.randint(660, 760)\r\n Y2 = random.randint(60, 720)\r\n Monster2 = enemy(X2, Y2)\r\n s += 1\r\n if bullet.y - 5 < Monster3.hitbox[1] + Monster3.hitbox[3] and bullet.y + 5 > Monster3.hitbox[\r\n 1]:\r\n if bullet.x + 5 > Monster3.hitbox[0] and bullet.x - 5 < Monster3.hitbox[0] + Monster3.hitbox[\r\n 2]:\r\n bullet.status = 0\r\n X3 = random.randint(830, 900)\r\n Y3 = random.randint(60, 720)\r\n Monster3 = enemy(X3, Y3)\r\n s += 1\r\n if bullet.status == 0:\r\n shootStage = 0\r\n bullets.pop(bullets.index(bullet))\r\n\r\n display(s)\r\n pygame.display.update()\r\n\r\npygame.quit()\r\n\r\n", "step-ids": [ 6, 9, 13, 14, 18 ] }
[ 6, 9, 13, 14, 18 ]
''' Faraday Penetration Test IDE Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/) See the file 'doc/LICENSE' for the license information ''' """ This module contains some useful functions to embedd an IPython shell. This allows to interactively test things. TODO: create a QT Widget capable of running the IPython shell whitout blocking the entire app. Kind of the http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK """ import traceback import model.api IPYTHON_BANNER = "\n".join(["-"*45, "Starting embedded IPython Shell...", "Press CTRL + D to exit.", "-"*45]) IPYTHON_EXIT_MSG = "\n".join(["-"*45, "Exiting IPython Shell...", "Returning normal execution.", "-"*45]) __ipython_active = False def embedd_ipython011(local_ns={}, global_ns={}): from IPython.config.loader import Config from IPython.frontend.terminal.embed import InteractiveShellEmbed cfg = Config() ipshell = InteractiveShellEmbed(config=cfg, banner1 = IPYTHON_BANNER, exit_msg = IPYTHON_EXIT_MSG) ipshell(local_ns=local_ns, global_ns=global_ns) def embedd_ipython010(local_ns={}, global_ns={}): from IPython.Shell import IPShellEmbed ipshell = IPShellEmbed( [""], banner = IPYTHON_BANNER, exit_msg = IPYTHON_EXIT_MSG ) ipshell(local_ns=local_ns, global_ns=global_ns) def embedd(local_ns={}, global_ns={}): global __ipython_active if __ipython_active: return __ipython_active = True try: import IPython version = IPython.__version__.split(".")[1] if int(version) > 10: embedd_ipython011(local_ns, global_ns) else: embedd_ipython010(local_ns, global_ns) except Exception, e: msg = "An error ocurred while trying to embedd the IPython Shell\n%s" model.api.log(msg % str(e), "ERROR") model.api.devlog(msg % traceback.format_exc()) finally: __ipython_active = False def embeddQT(local_ns={}, global_ns={}): global __ipython_active if __ipython_active: return __ipython_active = True try: from IPython.Shell import IPShellQt ipshell = IPShellQt( [""], user_ns=local_ns, user_global_ns=global_ns ) ipshell.run() except Exception: model.api.devlog("An error ocurred while trying to embedd the IPython Shell\n%s" % traceback.format_exc()) finally: __ipython_active = False
normal
{ "blob_id": "3eb071fa826c838d847e3f97abe3b706760a1336", "index": 1309, "step-1": "'''\nFaraday Penetration Test IDE\nCopyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)\nSee the file 'doc/LICENSE' for the license information\n\n'''\n\"\"\"\nThis module contains some useful functions to embedd an IPython shell.\nThis allows to interactively test things.\nTODO: create a QT Widget capable of running the IPython shell whitout\nblocking the entire app. Kind of the http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK\n\"\"\"\n\nimport traceback\nimport model.api\n\nIPYTHON_BANNER = \"\\n\".join([\"-\"*45,\n \"Starting embedded IPython Shell...\",\n \"Press CTRL + D to exit.\",\n \"-\"*45])\n\nIPYTHON_EXIT_MSG = \"\\n\".join([\"-\"*45,\n \"Exiting IPython Shell...\",\n \"Returning normal execution.\",\n \"-\"*45])\n\n__ipython_active = False\n\n \n \n\ndef embedd_ipython011(local_ns={}, global_ns={}):\n from IPython.config.loader import Config\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n cfg = Config() \n ipshell = InteractiveShellEmbed(config=cfg,\n banner1 = IPYTHON_BANNER,\n exit_msg = IPYTHON_EXIT_MSG)\n \n ipshell(local_ns=local_ns, global_ns=global_ns)\n\n\ndef embedd_ipython010(local_ns={}, global_ns={}):\n from IPython.Shell import IPShellEmbed\n ipshell = IPShellEmbed( [\"\"],\n banner = IPYTHON_BANNER,\n exit_msg = IPYTHON_EXIT_MSG\n )\n ipshell(local_ns=local_ns, global_ns=global_ns)\n \n\ndef embedd(local_ns={}, global_ns={}):\n global __ipython_active\n if __ipython_active:\n return\n\n __ipython_active = True\n try:\n import IPython\n version = IPython.__version__.split(\".\")[1]\n if int(version) > 10:\n embedd_ipython011(local_ns, global_ns)\n else:\n embedd_ipython010(local_ns, global_ns)\n \n except Exception, e:\n msg = \"An error ocurred while trying to embedd the IPython Shell\\n%s\"\n model.api.log(msg % str(e), \"ERROR\")\n model.api.devlog(msg % traceback.format_exc())\n finally:\n __ipython_active = False\n\n\ndef embeddQT(local_ns={}, global_ns={}):\n \n\n global __ipython_active\n if __ipython_active:\n return\n __ipython_active = True\n try:\n from IPython.Shell import IPShellQt\n ipshell = IPShellQt( [\"\"],\n user_ns=local_ns,\n user_global_ns=global_ns\n )\n ipshell.run()\n except Exception:\n model.api.devlog(\"An error ocurred while trying to embedd the IPython Shell\\n%s\" % traceback.format_exc())\n finally:\n __ipython_active = False\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
__author__ = 'asistente' #from __future__ import absolute_import from unittest import TestCase from selenium import webdriver from selenium.webdriver.common.by import By class FunctionalTest(TestCase): def setUp(self): self.browser = webdriver.Chrome("C:\\chromedriver\\chromedriver.exe") self.browser.implicitly_wait(2) def tearDown(self): self.browser.quit() def test_title(self): self.browser.get('http://localhost:8000') self.assertIn('BuscoAyuda', self.browser.title) def test_registro(self): self.browser.get('http://localhost:8000') link = self.browser.find_element_by_id('id_register') link.click() nombre = self.browser.find_element_by_id('id_nombre') nombre.send_keys('Rafael') apellidos = self.browser.find_element_by_id('id_apellidos') apellidos.send_keys('Medrano') experiencia = self.browser.find_element_by_id('id_aniosExperiencia') experiencia.send_keys('7') self.browser.find_element_by_xpath( "//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click() telefono = self.browser.find_element_by_id('id_telefono') telefono.send_keys('3135555555') correo = self.browser.find_element_by_id('id_correo') correo.send_keys('[email protected]') imagen = self.browser.find_element_by_id('id_imagen') imagen.send_keys('C:\chromedriver\developer.jpg') nombreUsuario = self.browser.find_element_by_id('id_username') nombreUsuario.send_keys('re.medrano') clave = self.browser.find_element_by_id('id_password') clave.send_keys('prueba123') botonGrabar = self.browser.find_element_by_id('id_grabar') botonGrabar.click() self.browser.implicitly_wait(3) span = self.browser.find_element(By.XPATH, '//span[text()="Rafael Medrano"]') self.assertIn('Rafael Medrano', span.text) def test_verDetalle(self): self.browser.get('http://localhost:8000') span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]') span.click() self.browser.implicitly_wait(3) h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]') self.assertIn('Betzy Editado Montanez Editado', h2.text) def test_login(self): self.browser.get('http://localhost:8000') link = self.browser.find_element_by_id('id_login') link.click() nombreUsuario = self.browser.find_element_by_id('username') nombreUsuario.send_keys('ba.montanez') clave = self.browser.find_element_by_id('password') clave.send_keys('prueba123') botonIngresar = self.browser.find_element_by_id('id_ingresar') botonIngresar.click() self.browser.implicitly_wait(3) span = self.browser.find_element(By.XPATH, '//span[text()=" Logout"]') self.assertIn('Logout', span.text) def test_Editar(self): self.browser.get('http://localhost:8000') link = self.browser.find_element_by_id('id_login') link.click() nombreUsuario = self.browser.find_element_by_id('username') nombreUsuario.send_keys('ba.montanez') claveIngreso = self.browser.find_element_by_id('password') claveIngreso.send_keys('prueba123') botonIngresar = self.browser.find_element_by_id('id_ingresar') botonIngresar.click() self.browser.implicitly_wait(3) linkEditar = self.browser.find_element_by_id('id_editar') linkEditar.click() nombre = self.browser.find_element_by_id('id_nombre') nombre.clear() nombre.send_keys('Betzy Editado') apellidos = self.browser.find_element_by_id('id_apellidos') apellidos.clear() apellidos.send_keys('Montanez Editado') experiencia = self.browser.find_element_by_id('id_aniosExperiencia') experiencia.clear() experiencia.send_keys('10') self.browser.find_element_by_xpath( "//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']").click() telefono = self.browser.find_element_by_id('id_telefono') telefono.clear() telefono.send_keys('313555666') correo = self.browser.find_element_by_id('id_correo') correo.clear() correo.send_keys('[email protected]') imagen = self.browser.find_element_by_id('id_imagen') imagen.send_keys('C:\chromedriver\developer.jpg') nombreUsuario = self.browser.find_element_by_id('id_username') nombreUsuario.clear() nombreUsuario.send_keys('ba.montanez2') clave = self.browser.find_element_by_id('id_password') clave.clear() clave.send_keys('prueba1234') botonGrabar = self.browser.find_element_by_id('id_editar') botonGrabar.click() self.browser.implicitly_wait(3) span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]') self.assertIn('Betzy Editado Montanez Editado', span.text) def test_Comentar(self): self.browser.get('http://localhost:8000') span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]') span.click() self.browser.implicitly_wait(3) h2 = self.browser.find_element(By.XPATH, '//h2[text()="Betzy Editado Montanez Editado"]') correo = self.browser.find_element_by_id('correo') correo.send_keys('[email protected]') comentario = self.browser.find_element_by_id('comentario') comentario.send_keys('Comentario Prueba') botonAceptar = self.browser.find_element_by_id('id_comentar') botonAceptar.click() self.browser.implicitly_wait(6) span = self.browser.find_element(By.XPATH, '//p[text()="Comentario Prueba"]') self.assertIn('Comentario Prueba', span.text) def test_listado(self): self.browser.get('http://localhost:8000') self.browser.implicitly_wait(3) span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]') self.assertIn('Betzy Editado Montanez Editado', span.text) self.browser.implicitly_wait(3) span = self.browser.find_element(By.XPATH, '//span[text()="pepito perez"]') self.assertIn('pepito perez', span.text) def test_buscar(self): self.browser.get('http://localhost:8000') correo = self.browser.find_element_by_id('buscar') correo.send_keys('Betzy Editado Montanez Editado') botonBuscar = self.browser.find_element_by_id('id_buscar') botonBuscar.click() self.browser.implicitly_wait(6) span = self.browser.find_element(By.XPATH, '//span[text()="Betzy Editado Montanez Editado"]') self.assertIn('Betzy Editado Montanez Editado', span.text)
normal
{ "blob_id": "fc4cf800c663abf20bfba7fcc1032e09a992641b", "index": 5334, "step-1": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n <mask token>\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n <mask token>\n <mask token>\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n", "step-2": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n <mask token>\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n <mask token>\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('[email protected]')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n", "step-3": "<mask token>\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n <mask token>\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n <mask token>\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('[email protected]')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('[email protected]')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n", "step-4": "__author__ = 'asistente'\nfrom unittest import TestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome('C:\\\\chromedriver\\\\chromedriver.exe')\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n\n def test_registro(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_register')\n link.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.send_keys('Rafael')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.send_keys('Medrano')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.send_keys('7')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.send_keys('3135555555')\n correo = self.browser.find_element_by_id('id_correo')\n correo.send_keys('[email protected]')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.send_keys('re.medrano')\n clave = self.browser.find_element_by_id('id_password')\n clave.send_keys('prueba123')\n botonGrabar = self.browser.find_element_by_id('id_grabar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Rafael Medrano\"]')\n self.assertIn('Rafael Medrano', span.text)\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n\n def test_login(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n clave = self.browser.find_element_by_id('password')\n clave.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\" Logout\"]')\n self.assertIn('Logout', span.text)\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n self.browser.implicitly_wait(3)\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\"\n ).click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('[email protected]')\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\\\chromedriver\\\\developer.jpg')\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH,\n '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('[email protected]')\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n span = self.browser.find_element(By.XPATH,\n '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n", "step-5": "__author__ = 'asistente'\n\n#from __future__ import absolute_import\n\nfrom unittest import TestCase\nfrom selenium import webdriver\n\nfrom selenium.webdriver.common.by import By\n\nclass FunctionalTest(TestCase):\n\n def setUp(self):\n self.browser = webdriver.Chrome(\"C:\\\\chromedriver\\\\chromedriver.exe\")\n self.browser.implicitly_wait(2)\n\n def tearDown(self):\n self.browser.quit()\n\n def test_title(self):\n self.browser.get('http://localhost:8000')\n self.assertIn('BuscoAyuda', self.browser.title)\n\n def test_registro(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_register')\n link.click()\n\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.send_keys('Rafael')\n\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.send_keys('Medrano')\n\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.send_keys('7')\n\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\").click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.send_keys('3135555555')\n\n correo = self.browser.find_element_by_id('id_correo')\n correo.send_keys('[email protected]')\n\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\chromedriver\\developer.jpg')\n\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.send_keys('re.medrano')\n\n clave = self.browser.find_element_by_id('id_password')\n clave.send_keys('prueba123')\n\n botonGrabar = self.browser.find_element_by_id('id_grabar')\n botonGrabar.click()\n self.browser.implicitly_wait(3)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Rafael Medrano\"]')\n self.assertIn('Rafael Medrano', span.text)\n\n def test_verDetalle(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH, '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n\n self.assertIn('Betzy Editado Montanez Editado', h2.text)\n\n def test_login(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n\n clave = self.browser.find_element_by_id('password')\n clave.send_keys('prueba123')\n\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\" Logout\"]')\n\n self.assertIn('Logout', span.text)\n\n\n def test_Editar(self):\n self.browser.get('http://localhost:8000')\n link = self.browser.find_element_by_id('id_login')\n link.click()\n\n nombreUsuario = self.browser.find_element_by_id('username')\n nombreUsuario.send_keys('ba.montanez')\n\n claveIngreso = self.browser.find_element_by_id('password')\n claveIngreso.send_keys('prueba123')\n\n botonIngresar = self.browser.find_element_by_id('id_ingresar')\n botonIngresar.click()\n\n self.browser.implicitly_wait(3)\n\n linkEditar = self.browser.find_element_by_id('id_editar')\n linkEditar.click()\n\n nombre = self.browser.find_element_by_id('id_nombre')\n nombre.clear()\n nombre.send_keys('Betzy Editado')\n\n apellidos = self.browser.find_element_by_id('id_apellidos')\n apellidos.clear()\n apellidos.send_keys('Montanez Editado')\n\n experiencia = self.browser.find_element_by_id('id_aniosExperiencia')\n experiencia.clear()\n experiencia.send_keys('10')\n\n self.browser.find_element_by_xpath(\n \"//select[@id='id_tiposDeServicio']/option[text()='Desarrollador Web']\").click()\n telefono = self.browser.find_element_by_id('id_telefono')\n telefono.clear()\n telefono.send_keys('313555666')\n\n correo = self.browser.find_element_by_id('id_correo')\n correo.clear()\n correo.send_keys('[email protected]')\n\n imagen = self.browser.find_element_by_id('id_imagen')\n imagen.send_keys('C:\\chromedriver\\developer.jpg')\n\n nombreUsuario = self.browser.find_element_by_id('id_username')\n nombreUsuario.clear()\n nombreUsuario.send_keys('ba.montanez2')\n\n clave = self.browser.find_element_by_id('id_password')\n clave.clear()\n clave.send_keys('prueba1234')\n\n botonGrabar = self.browser.find_element_by_id('id_editar')\n botonGrabar.click()\n\n self.browser.implicitly_wait(3)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n def test_Comentar(self):\n self.browser.get('http://localhost:8000')\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n span.click()\n self.browser.implicitly_wait(3)\n h2 = self.browser.find_element(By.XPATH, '//h2[text()=\"Betzy Editado Montanez Editado\"]')\n\n correo = self.browser.find_element_by_id('correo')\n correo.send_keys('[email protected]')\n\n comentario = self.browser.find_element_by_id('comentario')\n comentario.send_keys('Comentario Prueba')\n\n botonAceptar = self.browser.find_element_by_id('id_comentar')\n botonAceptar.click()\n self.browser.implicitly_wait(6)\n\n span = self.browser.find_element(By.XPATH, '//p[text()=\"Comentario Prueba\"]')\n self.assertIn('Comentario Prueba', span.text)\n\n def test_listado(self):\n self.browser.get('http://localhost:8000')\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)\n\n self.browser.implicitly_wait(3)\n span = self.browser.find_element(By.XPATH, '//span[text()=\"pepito perez\"]')\n self.assertIn('pepito perez', span.text)\n\n def test_buscar(self):\n self.browser.get('http://localhost:8000')\n\n correo = self.browser.find_element_by_id('buscar')\n correo.send_keys('Betzy Editado Montanez Editado')\n\n botonBuscar = self.browser.find_element_by_id('id_buscar')\n botonBuscar.click()\n self.browser.implicitly_wait(6)\n\n span = self.browser.find_element(By.XPATH, '//span[text()=\"Betzy Editado Montanez Editado\"]')\n self.assertIn('Betzy Editado Montanez Editado', span.text)", "step-ids": [ 6, 7, 9, 13, 14 ] }
[ 6, 7, 9, 13, 14 ]
class Node: """ This class represent a node (vertex). """ def __init__(self, k: int = None, loc: tuple = None, **kwargs): """ Each node contain dew fields: key: node_id. location: node's position represent as 3DPoint. ni_out: a dictionary that holds all the "edges" that connected from this node, each edge is represented using a pair (key, edge weight). ni_in: a dictionary that holds all the "edges" that connected to this node, each edge is represented using a pair (key, edge weight) """ self.__key = k self.__location = loc self.__ni_out = {} self.__ni_in = {} def add_neighbor_out(self, neighbor_id: int, weight: float) -> None: """ Add "edge" that connected from this node (node_id ---> neighbor_id). :param neighbor_id: dest node key :param weight: edge's weight """ self.__ni_out[neighbor_id] = weight def add_neighbor_in(self, neighbor_id: int, weight: float) -> None: """ Add "edge" that connected to this node (neighbor_id ---> node_id). :param neighbor_id: dest node key :param weight: edge's weight """ self.__ni_in[neighbor_id] = weight def get_connections_out(self) -> dict: """ Return a dictionary that holds all the "edges" that connected from this node, each edge is represented using a pair (key, edge weight). :return: dictionary (key, edge weight). """ return self.__ni_out def get_connections_in(self) -> dict: """ Return a dictionary that holds all the "edges" that connected to this node, each edge is represented using a pair (key, edge weight). :return: dictionary (key, edge weight). """ return self.__ni_in def get_key(self) -> int: """ Return this node key. :return: key """ return self.__key def get_location(self) -> tuple: """ Return this node location as a 3DPoint (x, y, z). :return: this node location """ return self.__location def set_location(self, location: tuple) -> None: """ Allows to add location to this node. This method used for load and plot graphs that their nodes have no position. :param location: the new position of this node """ self.__location = location def as_dict_node(self): """ Return the node as dictionary {"pos": "x", "y", "z", "id": key} :return: the node as dictionary """ loc_as_str = str(self.get_location()) m_dict = {"pos": loc_as_str[1:-1], "id": self.get_key()} return m_dict def as_dict_edge(self): """ Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id} :return: the edge as dictionary """ l_list = [] for k, v in self.get_connections_out().items(): m_dict = {"src": int(self.get_key()), "w": float(v), "dest": int(k)} l_list.append(m_dict) return l_list def __repr__(self): return str([self.get_key()]) def __str__(self) -> str: return "Node: id: " + str(self.__key) + ' neighbors: ' + str(self.__ni_out) def __eq__(self, o: object) -> bool: if self is o: return True if o is None or self.__class__ is not o.__class__: return False other = o return self.__key == other.__key and self.__location.__eq__(other.__location) and self.__ni_in.__eq__( other.__ni_in) and self.__ni_out.__eq__(other.__ni_out)
normal
{ "blob_id": "9c3f6c368c764918da5cce44da574b7c041fa414", "index": 1364, "step-1": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n <mask token>\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n <mask token>\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n", "step-2": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n <mask token>\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n", "step-3": "class Node:\n <mask token>\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) ->tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n", "step-4": "class Node:\n \"\"\"\n This class represent a node (vertex).\n \"\"\"\n\n def __init__(self, k: int=None, loc: tuple=None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) ->dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) ->int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) ->tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) ->None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)\n }\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) ->str:\n return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.\n __ni_out)\n\n def __eq__(self, o: object) ->bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.\n __location) and self.__ni_in.__eq__(other.__ni_in\n ) and self.__ni_out.__eq__(other.__ni_out)\n", "step-5": "class Node:\n \"\"\"\n This class represent a node (vertex).\n \"\"\"\n\n def __init__(self, k: int = None, loc: tuple = None, **kwargs):\n \"\"\"\n Each node contain dew fields:\n key: node_id.\n location: node's position represent as 3DPoint.\n ni_out: a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n ni_in: a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight)\n \"\"\"\n self.__key = k\n self.__location = loc\n self.__ni_out = {}\n self.__ni_in = {}\n\n def add_neighbor_out(self, neighbor_id: int, weight: float) -> None:\n \"\"\"\n Add \"edge\" that connected from this node (node_id ---> neighbor_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_out[neighbor_id] = weight\n\n def add_neighbor_in(self, neighbor_id: int, weight: float) -> None:\n \"\"\"\n Add \"edge\" that connected to this node (neighbor_id ---> node_id).\n :param neighbor_id: dest node key\n :param weight: edge's weight\n \"\"\"\n self.__ni_in[neighbor_id] = weight\n\n def get_connections_out(self) -> dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected from this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_out\n\n def get_connections_in(self) -> dict:\n \"\"\"\n Return a dictionary that holds all the \"edges\" that connected to this node,\n each edge is represented using a pair (key, edge weight).\n :return: dictionary (key, edge weight).\n \"\"\"\n return self.__ni_in\n\n def get_key(self) -> int:\n \"\"\"\n Return this node key.\n :return: key\n \"\"\"\n return self.__key\n\n def get_location(self) -> tuple:\n \"\"\"\n Return this node location as a 3DPoint (x, y, z).\n :return: this node location\n \"\"\"\n return self.__location\n\n def set_location(self, location: tuple) -> None:\n \"\"\"\n Allows to add location to this node.\n This method used for load and plot graphs that their nodes have no position.\n :param location: the new position of this node\n \"\"\"\n self.__location = location\n\n def as_dict_node(self):\n \"\"\"\n Return the node as dictionary {\"pos\": \"x\", \"y\", \"z\", \"id\": key}\n :return: the node as dictionary\n \"\"\"\n loc_as_str = str(self.get_location())\n m_dict = {\"pos\": loc_as_str[1:-1], \"id\": self.get_key()}\n return m_dict\n\n def as_dict_edge(self):\n \"\"\"\n Return the edge as dictionary {\"src\": src node_id, \"w\": edge weight, \"dest\": dest node_id}\n :return: the edge as dictionary\n \"\"\"\n l_list = []\n for k, v in self.get_connections_out().items():\n m_dict = {\"src\": int(self.get_key()), \"w\": float(v), \"dest\": int(k)}\n l_list.append(m_dict)\n return l_list\n\n def __repr__(self):\n return str([self.get_key()])\n\n def __str__(self) -> str:\n return \"Node: id: \" + str(self.__key) + ' neighbors: ' + str(self.__ni_out)\n\n def __eq__(self, o: object) -> bool:\n if self is o:\n return True\n if o is None or self.__class__ is not o.__class__:\n return False\n other = o\n return self.__key == other.__key and self.__location.__eq__(other.__location) and self.__ni_in.__eq__(\n other.__ni_in) and self.__ni_out.__eq__(other.__ni_out)", "step-ids": [ 12, 13, 14, 15, 16 ] }
[ 12, 13, 14, 15, 16 ]
from PyQt5 import QtCore, QtWidgets from .main_window_base import Ui_MainWindow from .custom_sort_filter_proxy_model import CustomSortFilterProxyModel from .tree_model import TreeModel model_filename = "widgets/default.txt" class MainWindow(Ui_MainWindow, QtCore.QObject): def __init__(self, qmain_window): super().__init__() self.setupUi(qmain_window) self._proxy_model = CustomSortFilterProxyModel(self) self._model = TreeModel(model_filename) self._proxy_model.setSourceModel(self._model) self.treeView.setModel(self._proxy_model) self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) # Attach slot/signals self.filterPatternEdit.editingFinished.connect( lambda: self._proxy_model.update_filter_pattern(self.filterPatternEdit.text())) self.filterSyntaxComboBox.currentTextChanged.connect(self._proxy_model.update_filter_syntax) self.filterColumnComboBox.currentTextChanged.connect(self._proxy_model.update_filter_column) self.caseSensitiveFilterCB.stateChanged.connect( lambda state: self._proxy_model.update_case_sensitive_filter(state)) self.caseSensitiveSortingCB.stateChanged.connect( lambda state: self._proxy_model.update_case_sensitive_sort(state))
normal
{ "blob_id": "7a918518d8c9ff1184a634d1a5c799e735dfbc8a", "index": 1707, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n", "step-3": "<mask token>\nmodel_filename = 'widgets/default.txt'\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n", "step-4": "from PyQt5 import QtCore, QtWidgets\nfrom .main_window_base import Ui_MainWindow\nfrom .custom_sort_filter_proxy_model import CustomSortFilterProxyModel\nfrom .tree_model import TreeModel\nmodel_filename = 'widgets/default.txt'\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.\n ResizeToContents)\n self.filterPatternEdit.editingFinished.connect(lambda : self.\n _proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self.\n _proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(lambda state: self.\n _proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(lambda state: self\n ._proxy_model.update_case_sensitive_sort(state))\n", "step-5": "\nfrom PyQt5 import QtCore, QtWidgets\n\nfrom .main_window_base import Ui_MainWindow\nfrom .custom_sort_filter_proxy_model import CustomSortFilterProxyModel\nfrom .tree_model import TreeModel\n\nmodel_filename = \"widgets/default.txt\"\n\n\nclass MainWindow(Ui_MainWindow, QtCore.QObject):\n\n def __init__(self, qmain_window):\n super().__init__()\n self.setupUi(qmain_window)\n\n self._proxy_model = CustomSortFilterProxyModel(self)\n self._model = TreeModel(model_filename)\n self._proxy_model.setSourceModel(self._model)\n self.treeView.setModel(self._proxy_model)\n\n self.treeView.header().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)\n\n # Attach slot/signals\n self.filterPatternEdit.editingFinished.connect(\n lambda: self._proxy_model.update_filter_pattern(self.filterPatternEdit.text()))\n self.filterSyntaxComboBox.currentTextChanged.connect(self._proxy_model.update_filter_syntax)\n self.filterColumnComboBox.currentTextChanged.connect(self._proxy_model.update_filter_column)\n self.caseSensitiveFilterCB.stateChanged.connect(\n lambda state: self._proxy_model.update_case_sensitive_filter(state))\n self.caseSensitiveSortingCB.stateChanged.connect(\n lambda state: self._proxy_model.update_case_sensitive_sort(state))\n", "step-ids": [ 0, 2, 3, 4, 5 ] }
[ 0, 2, 3, 4, 5 ]
import numpy as np import time # Create key based on timestamp KEY = time.time() np.random.seed(int(KEY)) # Read in message with open('Message.txt', 'r') as f: Message = f.read() f.close() # Generate vector of random integers Encoder = np.random.random_integers(300, size=len(Message)) # Map message to encoded array M = [] for i in range(len(Message)): M.append(ord(Message[i])*Encoder[i]) # Create or overwrite the file with the message with open('ENCODED.txt', 'w') as e: for m in M: e.write(str(m)+" ") # Create or overwrite the file with the key with open('KEY.txt', 'w') as f: f.write(str(KEY)) print "Your message has been encoded!"
normal
{ "blob_id": "b2f9a133581b5144b73a47f50a3b355d1112f7ea", "index": 4072, "step-1": "import numpy as np\nimport time\n\n# Create key based on timestamp\nKEY = time.time()\nnp.random.seed(int(KEY))\n\n# Read in message\nwith open('Message.txt', 'r') as f:\n\tMessage = f.read()\n\tf.close()\n\n# Generate vector of random integers\nEncoder = np.random.random_integers(300, size=len(Message))\n\n# Map message to encoded array\nM = []\nfor i in range(len(Message)):\n M.append(ord(Message[i])*Encoder[i])\n\n# Create or overwrite the file with the message\nwith open('ENCODED.txt', 'w') as e:\n for m in M:\n e.write(str(m)+\" \")\n\n# Create or overwrite the file with the key\nwith open('KEY.txt', 'w') as f:\n f.write(str(KEY))\n\nprint \"Your message has been encoded!\"", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from PyQt4.QtGui import QSystemTrayIcon, QApplication, QMenu, QIcon class SystemTrayIcon(QSystemTrayIcon): def __init__(self, parent=None): super(SystemTrayIcon, self).__init__(parent) self.set_icon_state(QIcon.Disabled) menu = QMenu(parent) self.exit_action = menu.addAction('E&xit') self.exit_action.triggered.connect(self.close_application) self.setContextMenu(menu) self.setToolTip(QApplication.instance().applicationName()) def close_application(self): self.parent().close() def set_icon_state(self, state): pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state) self.setIcon(QIcon(pixmap))
normal
{ "blob_id": "c6e315d7dd44b998f64eee079f2d8455ffecdc30", "index": 9931, "step-1": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n <mask token>\n <mask token>\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n", "step-2": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n <mask token>\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n", "step-3": "<mask token>\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n\n def close_application(self):\n self.parent().close()\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n", "step-4": "from PyQt4.QtGui import QSystemTrayIcon, QApplication, QMenu, QIcon\n\n\nclass SystemTrayIcon(QSystemTrayIcon):\n\n def __init__(self, parent=None):\n super(SystemTrayIcon, self).__init__(parent)\n self.set_icon_state(QIcon.Disabled)\n menu = QMenu(parent)\n self.exit_action = menu.addAction('E&xit')\n self.exit_action.triggered.connect(self.close_application)\n self.setContextMenu(menu)\n self.setToolTip(QApplication.instance().applicationName())\n\n def close_application(self):\n self.parent().close()\n\n def set_icon_state(self, state):\n pixmap = QApplication.instance().windowIcon().pixmap(256, 256, state)\n self.setIcon(QIcon(pixmap))\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
# 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив, # заданный случайными числами на промежутке [0; 50). # Выведите на экран исходный и отсортированный массивы. from random import randint # создаем массив [0, 50) случайных чисел size = 13 array = [randint(0, 50) for x in range(size)] print('*' * 30) print('Initial array:') print(array) print('*' * 30) def merge_sort(merged_arr: list): """ функция делит поданный на вход массив, и рекурсивно все сортирует слиянием :param merged_arr: - список на входе :return: - список отсортированный слиянием на выходе """ # если массив единичный, то "приехали" if len(merged_arr) <= 1: return # разбиваем начальный массив на левую и правую части middle = len(merged_arr) // 2 left = merged_arr[:middle] right = merged_arr[middle:] # рекуррентно их сортируем merge_sort(left) merge_sort(right) # "сливаем" левую и правые части comb_arr = merge(left, right) for i in range(len(merged_arr)): merged_arr[i] = comb_arr[i] return merged_arr def merge(merge_1: list, merge_2: list): """ Функция собирает из двух предварительно отсортированных массивов, поданных на вход, один и ео же возвращает :param merge_1: - первый отсортированный список :param merge_2: - второй отсортированный список :return: - "слитый" из двух, отсортированный список """ # заполняем дополнительный массив С нулями merged_arr = [0] * (len(merge_1) + len(merge_2)) # объявляем и обнуляем счетчики i = k = n = 0 # разбираем в С из А или В меньший элемент, пока какой-то из А или В не закончится while i < len(merge_1) and k < len(merge_2): if merge_1[i] <= merge_2[k]: merged_arr[n] = merge_1[i] i += 1 n += 1 else: merged_arr[n] = merge_2[k] k += 1 n += 1 # докладываем в С остатки из А или В - где осталось. while i < len(merge_1): merged_arr[n] = merge_1[i] i += 1 n += 1 while k < len(merge_2): merged_arr[n] = merge_2[k] k += 1 n += 1 return merged_arr print('Merge sorted array:') print(merge_sort(array)) print('*' * 30)
normal
{ "blob_id": "cd1987f09ca3e09ac251b1ebdec4168fd5dbdd0e", "index": 7607, "step-1": "<mask token>\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\n<mask token>\n", "step-2": "<mask token>\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n", "step-3": "<mask token>\nsize = 13\narray = [randint(0, 50) for x in range(size)]\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n", "step-4": "from random import randint\nsize = 13\narray = [randint(0, 50) for x in range(size)]\nprint('*' * 30)\nprint('Initial array:')\nprint(array)\nprint('*' * 30)\n\n\ndef merge_sort(merged_arr: list):\n \"\"\"\n функция делит поданный на вход массив,\n и рекурсивно все сортирует слиянием\n :param merged_arr: - список на входе\n :return: - список отсортированный слиянием на выходе\n \"\"\"\n if len(merged_arr) <= 1:\n return\n middle = len(merged_arr) // 2\n left = merged_arr[:middle]\n right = merged_arr[middle:]\n merge_sort(left)\n merge_sort(right)\n comb_arr = merge(left, right)\n for i in range(len(merged_arr)):\n merged_arr[i] = comb_arr[i]\n return merged_arr\n\n\ndef merge(merge_1: list, merge_2: list):\n \"\"\"\n Функция собирает из двух предварительно отсортированных массивов,\n поданных на вход, один и ео же возвращает\n :param merge_1: - первый отсортированный список\n :param merge_2: - второй отсортированный список\n :return: - \"слитый\" из двух, отсортированный список\n \"\"\"\n merged_arr = [0] * (len(merge_1) + len(merge_2))\n i = k = n = 0\n while i < len(merge_1) and k < len(merge_2):\n if merge_1[i] <= merge_2[k]:\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n else:\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n while i < len(merge_1):\n merged_arr[n] = merge_1[i]\n i += 1\n n += 1\n while k < len(merge_2):\n merged_arr[n] = merge_2[k]\n k += 1\n n += 1\n return merged_arr\n\n\nprint('Merge sorted array:')\nprint(merge_sort(array))\nprint('*' * 30)\n", "step-5": "# 2. Отсортируйте по возрастанию методом слияния одномерный вещественный массив,\r\n# заданный случайными числами на промежутке [0; 50).\r\n# Выведите на экран исходный и отсортированный массивы.\r\n\r\nfrom random import randint\r\n\r\n# создаем массив [0, 50) случайных чисел\r\n\r\nsize = 13\r\narray = [randint(0, 50) for x in range(size)]\r\n\r\nprint('*' * 30)\r\nprint('Initial array:')\r\nprint(array)\r\nprint('*' * 30)\r\n\r\n\r\ndef merge_sort(merged_arr: list):\r\n \"\"\"\r\n функция делит поданный на вход массив,\r\n и рекурсивно все сортирует слиянием\r\n :param merged_arr: - список на входе\r\n :return: - список отсортированный слиянием на выходе\r\n \"\"\"\r\n # если массив единичный, то \"приехали\"\r\n if len(merged_arr) <= 1:\r\n return\r\n # разбиваем начальный массив на левую и правую части\r\n middle = len(merged_arr) // 2\r\n left = merged_arr[:middle]\r\n right = merged_arr[middle:]\r\n # рекуррентно их сортируем\r\n merge_sort(left)\r\n merge_sort(right)\r\n # \"сливаем\" левую и правые части\r\n comb_arr = merge(left, right)\r\n for i in range(len(merged_arr)):\r\n merged_arr[i] = comb_arr[i]\r\n return merged_arr\r\n\r\n\r\ndef merge(merge_1: list, merge_2: list):\r\n \"\"\"\r\n Функция собирает из двух предварительно отсортированных массивов,\r\n поданных на вход, один и ео же возвращает\r\n :param merge_1: - первый отсортированный список\r\n :param merge_2: - второй отсортированный список\r\n :return: - \"слитый\" из двух, отсортированный список\r\n \"\"\"\r\n # заполняем дополнительный массив С нулями\r\n merged_arr = [0] * (len(merge_1) + len(merge_2))\r\n # объявляем и обнуляем счетчики\r\n i = k = n = 0\r\n # разбираем в С из А или В меньший элемент, пока какой-то из А или В не закончится\r\n while i < len(merge_1) and k < len(merge_2):\r\n if merge_1[i] <= merge_2[k]:\r\n merged_arr[n] = merge_1[i]\r\n i += 1\r\n n += 1\r\n else:\r\n merged_arr[n] = merge_2[k]\r\n k += 1\r\n n += 1\r\n # докладываем в С остатки из А или В - где осталось.\r\n while i < len(merge_1):\r\n merged_arr[n] = merge_1[i]\r\n i += 1\r\n n += 1\r\n while k < len(merge_2):\r\n merged_arr[n] = merge_2[k]\r\n k += 1\r\n n += 1\r\n return merged_arr\r\n\r\n\r\nprint('Merge sorted array:')\r\nprint(merge_sort(array))\r\nprint('*' * 30)\r\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class CnnArticleItem(scrapy.Item): title = scrapy.Field() developments = scrapy.Field() body = scrapy.Field() date = scrapy.Field() class GoogleArticleItem(scrapy.Item): title = scrapy.Field() date = scrapy.Field() snippet = scrapy.Field() source = scrapy.Field()
normal
{ "blob_id": "cf0eb9685cdfc412871d3b36270ddab3e520bb8f", "index": 104, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-3": "<mask token>\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-4": "import scrapy\n\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()\n", "step-5": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\n\nclass CnnArticleItem(scrapy.Item):\n title = scrapy.Field()\n developments = scrapy.Field()\n body = scrapy.Field()\n date = scrapy.Field()\n\nclass GoogleArticleItem(scrapy.Item):\n title = scrapy.Field()\n date = scrapy.Field()\n snippet = scrapy.Field()\n source = scrapy.Field()", "step-ids": [ 0, 3, 4, 5, 6 ] }
[ 0, 3, 4, 5, 6 ]
import pymysql import pymssql import socket import threading from time import sleep address = ('127.0.0.1', 20176) usermode = {1: 'Wangcz_Students', 2: 'Wangcz_Teachers', 3: 'Wangcz_Admin' } def checkuser(username, password, cursor, user_db): cursor.execute('''select * from %s WHERE username = %d AND password = %d''' % (user_db, int(username), int(password))) return cursor.fetchall() def tcplink(sock, addr): conn = pymysql.connect() cursor = conn.cursor() while True: bytedata = sock.recv(1024) data = eval(bytedata.decode()) sleep(1) if data: if 'username' and 'password' and 'login_mode' in data.keys(): if checkuser(data['username'],data['password'],cursor=cursor, user_db=usermode[data[login_mode]]): sock.send(b'Login success')#登陆成功 else: sock.send(b'Error')#发送错误消息 else: break sock.close() if __name__ == '__main__': s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(address) s.listen(10) while True: sock,addr = s.accept() t = threading.Thread(target=tcplink,args=(sock,addr))
normal
{ "blob_id": "758e5b9a65132c4bdee4600e79c27f9c0f272312", "index": 8308, "step-1": "<mask token>\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n", "step-3": "<mask token>\naddress = '127.0.0.1', 20176\nusermode = {(1): 'Wangcz_Students', (2): 'Wangcz_Teachers', (3): 'Wangcz_Admin'\n }\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n", "step-4": "import pymysql\nimport pymssql\nimport socket\nimport threading\nfrom time import sleep\naddress = '127.0.0.1', 20176\nusermode = {(1): 'Wangcz_Students', (2): 'Wangcz_Teachers', (3): 'Wangcz_Admin'\n }\n\n\ndef checkuser(username, password, cursor, user_db):\n cursor.execute('select * from %s WHERE username = %d AND password = %d' %\n (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'], data['password'], cursor=\n cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')\n else:\n sock.send(b'Error')\n else:\n break\n sock.close()\n\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock, addr = s.accept()\n t = threading.Thread(target=tcplink, args=(sock, addr))\n", "step-5": "import pymysql\nimport pymssql\nimport socket\nimport threading\nfrom time import sleep\n\naddress = ('127.0.0.1', 20176)\nusermode = {1: 'Wangcz_Students',\n 2: 'Wangcz_Teachers',\n 3: 'Wangcz_Admin'\n }\n\ndef checkuser(username, password, cursor, user_db):\n\n cursor.execute('''select * from %s WHERE username = %d AND password = %d''' % (user_db, int(username), int(password)))\n return cursor.fetchall()\n\n\ndef tcplink(sock, addr):\n conn = pymysql.connect()\n cursor = conn.cursor()\n while True:\n bytedata = sock.recv(1024)\n data = eval(bytedata.decode())\n sleep(1)\n if data:\n if 'username' and 'password' and 'login_mode' in data.keys():\n if checkuser(data['username'],data['password'],cursor=cursor, user_db=usermode[data[login_mode]]):\n sock.send(b'Login success')#登陆成功\n else:\n sock.send(b'Error')#发送错误消息\n else:\n break\n\n sock.close()\n\nif __name__ == '__main__':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(address)\n s.listen(10)\n while True:\n sock,addr = s.accept()\n t = threading.Thread(target=tcplink,args=(sock,addr))", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from flask_wtf import FlaskForm from wtforms import StringField, DateField, DecimalField class HoursForm(FlaskForm): date = StringField("Date") begins = DecimalField("Begins") ends = DecimalField("Ends") class Meta: csrf = False
normal
{ "blob_id": "b1a808e76008edec02d37ec596461e3a00a1d349", "index": 4553, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n csrf = False\n", "step-3": "<mask token>\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n", "step-4": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\n\nclass HoursForm(FlaskForm):\n date = StringField('Date')\n begins = DecimalField('Begins')\n ends = DecimalField('Ends')\n\n\n class Meta:\n csrf = False\n", "step-5": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, DateField, DecimalField\n\nclass HoursForm(FlaskForm):\n date = StringField(\"Date\")\n begins = DecimalField(\"Begins\")\n ends = DecimalField(\"Ends\")\n \n class Meta:\n csrf = False\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python import sys def get_params(fname): d = dict() with open(fname) as f: for line in f: l = line.strip() if (line[0] == '#'): continue param = line.split('=') v = ' '.join(param[1:]) d[param[0]] = v.strip('\n') return d usage_text = "Compares boot configs of two kernels\n" \ "Usage: {0} <filename1> <filename2>".format(sys.argv[0]) try: f1 = sys.argv[1] f2 = sys.argv[2] except: print usage_text exit() params1 = get_params(f1) params2 = get_params(f2) param_names = set([key for key in params1]) | set([key for key in params2]) the_first = True f_output = "{0:80}{1:40}{2:40}" for param in param_names: try: val1 = params1[param] except KeyError: val1 = '-' try: val2 = params2[param] except KeyError: val2 = '-' if (val1 != val2): if the_first: print(f_output.format("Param name", f1, f2)) print "-"*140 the_first = False print (f_output.format(param, val1, val2))
normal
{ "blob_id": "d287a5128ca9352b2edc459c9e42a57ef800ec9c", "index": 7657, "step-1": "#!/usr/bin/python\n\nimport sys\n\ndef get_params(fname):\n\td = dict()\n\twith open(fname) as f:\n\t\tfor line in f:\n\t\t\tl = line.strip()\n\t\t\tif (line[0] == '#'):\n\t\t\t\tcontinue\n\t\t\tparam = line.split('=')\n\t\t\tv = ' '.join(param[1:])\n\t\t\td[param[0]] = v.strip('\\n') \n\treturn d\n\nusage_text = \"Compares boot configs of two kernels\\n\" \\\n\t\"Usage: {0} <filename1> <filename2>\".format(sys.argv[0])\ntry:\n\tf1 = sys.argv[1]\n\tf2 = sys.argv[2]\nexcept:\n\tprint usage_text\n\texit()\n\nparams1 = get_params(f1)\nparams2 = get_params(f2)\n\nparam_names = set([key for key in params1]) | set([key for key in params2])\n\n\nthe_first = True\nf_output = \"{0:80}{1:40}{2:40}\"\n\nfor param in param_names:\n\ttry:\n\t\tval1 = params1[param]\n\texcept KeyError:\n\t\tval1 = '-'\n\t\t\n\ttry:\n\t\tval2 = params2[param]\n\texcept KeyError:\n\t\tval2 = '-'\n\n\tif (val1 != val2):\n\t\tif the_first:\n\t\t\tprint(f_output.format(\"Param name\", f1, f2))\n\t\t\tprint \"-\"*140\n\t\t\tthe_first = False\n\n\t\tprint (f_output.format(param, val1, val2))\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from sys import exit # Outside def outside(): print """ Oyoiyoi ... The train isn't running due to the HVV being complete crap. Well, the Rewe around the corner is still open and there's a HSV bar around the corner. You want to get your drank on, right now! Where do you go? """ choice = raw_input("> ") if choice == "Rewe": print "Off to Rewe!" Rewe() elif choice == "HSV bar": print "Off to the HSV bar!" bar_sober() else: die() # Outsid - Rewe def Rewe(): print """ Ohhaohaoha...Rewe ist packed! ..and you're still sober! What is this, gradeschool? Forget about the beer, just get in line at the register and grab whatever hard liquor is at kids' reach (way to go Germany..). \n \n Alrgiht, now you're back outside and your nipps are about to freeze off! Where are you going to go now? Time for the HSV bar, or are you done with the world and want to just go home? """ choice = raw_input("> ") if choice == "HSV bar": print "To the HSV bar!" way_bar() elif choice == "go home": print "Homeward bound!" way_inside() else: die() # Outside - Rewe - way to the bar def way_bar(): print """ You managed to grab a box of some good ol' German Schnapps! These 12 little babies are gonna get you in the right kind of mood. You have about a 5 to 10 minute walk ahead of you.. How many mini bottles of goodness will you gulp down on the way? """ choice = raw_input("> ") how_much = int(choice) if how_much < 3: bar_sober() elif 6 > how_much >= 3: bar_buzzed() else: loose(""" Well, I mean you did want to get wierd tonight, but this just escalated far too quickly! You need to get ahold of yourself! Now you've thrown your cookies all over the sidewalk..Though I am a bit proud of you, you better just go home and sleep..if you can find your way. """) # Outside - Rewe - way back home def way_inside(): print """ You managed to grab a box of some good ol' German Schnapps! These 12 little babies are gonna get you in the right kind of mood. You have about a 5 to 10 minute walk ahead of you.. How many mini bottles of goodness will you gulp down on the way? """ choice = raw_input("> ") how_much = int(choice) if how_much < 3: inside_sober() elif 6 > how_much >= 3: inside_buzzed() else: loose(""" Well, I mean you did want to get wierd tonight, but this just escalated far too quickly! You need to get ahold of yourself! Now you've thrown your cookies all over the sidewalk..Though I am a bit proud of you, you better just go home and sleep..if you can find your way. """) # Outside - Rewe - Inside(buzzed) def inside_buzzed(): print """ Now you're a little buzzed and all warm in your humble abode! You could kick it here with ya bad self, or ask if some peeps want to come over. Do you want to invite people to come get wrecked with you? """ choice = raw_input("> ") if choice == "yes": print "Okay, let's get this party goin'!" inside_buzzed_invite() elif choice == "no": print "There's only enough liquor for numero uno." inside_buzzed_alone() else: die() # Outside - Rewe - Inside(buzzed) Invite def inside_buzzed_invite(): print """ Congrats.. Both of your two friends are busy. Well, so much for trying to be social! Guess you'll be hanging out alone after all. """ inside_buzzed_alone() # Outside - Rewe - Inside(buzzed) ALone def inside_buzzed_alone(): print """ Now you're a little buzzed and all warm in your humble abode! Time to watch episodes of 'Intervention' and drink everytime someone makes a worse life choice than you have! """ win("Yay for not being at the very bottom!") # Inside (sober) def inside_sober(): print """ Alright alright alright. You could kick it here with ya bad self, or ask if some peeps want to come over. Do you want to invite people to come get wrecked with you? """ choice = raw_input("> ") if choice == "yes": print "It'll be nice to have some social interaction." inside_sober_invite() elif choice == "no": print "Ew gross, people are icky." inside_sober_alone() else: die() # Inside (sober) invite def inside_sober_invite(): print """ Wow you're feeling socially acceptable today! Three people are now at your place and you don't have much alcohol. Way to go, you anti-social worm. You're not drunk enough to be entertaining! You forgot you can't handle being responsible for social encounters. Akwardness. Overwhelms. You! """ loose("You're an anxious mess.") # Inside(sober) - Alone def inside_sober_alone(): print """ Wohoo! Time to drink what you've got and play some sweet video games until your eyes bleed! Who needs other people to enjoy themselves? Being socially awkward rules! And the best part is: You don't have to wear pants! """ win("This is the (anti-social) life!") # Outside - Rewe - bar(buzzed) def bar_buzzed(): print """ On the way to the bar, you see the disco lights flashing and you can here the German Schlager music being accompanied by the voices of old people. Nice. The few bottles of liquor you drank are kicking in just in time! You've consumed the perfect amount for this kind of thing! Once you get in the bar everyone cheers, even though you don't know them! Some old lady is celebrating the death of her husband and buying rounds for everyone. """ win("You hit the party jackpot!") # Outside - Bar(sober) def bar_sober(): print """ So now you're inside, and people seem to be having a good time. The problem is: they are drunk; you are not! You then realize that you can't pay with card here and you don't have enough cash for a drink.. Even if you brought booze with you, you wouldn't be able to drink it in here. Way to go.. Because you're too sober to be socially acceptable, you can't find the courage to ask the people celebrating if you can join. """ loose("You're uncomfortable and go home as the anxious mess that alaways had been.") # End of game, added to the variable of "why" def win(why): print why, " Bitchin'." exit(0) def loose(why): print why, " Laaame.." exit (0) def die(): print """ How dare you think out of the box?! You die sober!! """ # Begining of game def start(): print """ It's Friday night and you want to get hammered! Do you want to go out or stay home? """ choice = raw_input("> ") if choice == "out": outside() elif choice == "stay home": inside_sober() else: die() start()
normal
{ "blob_id": "b3bace532f687edc966c6aef5f454bde9367204f", "index": 4500, "step-1": "from sys import exit\n\n# Outside\ndef outside():\n print \"\"\"\n Oyoiyoi ... The train isn't running due to the HVV being complete crap.\n Well, the Rewe around the corner is still open\n and there's a HSV bar around the corner.\n You want to get your drank on, right now!\n Where do you go?\n \"\"\"\n choice = raw_input(\"> \")\n if choice == \"Rewe\":\n print \"Off to Rewe!\"\n Rewe()\n elif choice == \"HSV bar\":\n print \"Off to the HSV bar!\"\n bar_sober()\n else:\n die()\n\n# Outsid - Rewe\ndef Rewe():\n print \"\"\"\n Ohhaohaoha...Rewe ist packed!\n ..and you're still sober!\n What is this, gradeschool?\n Forget about the beer, just get in line at the register\n and grab whatever hard liquor is at kids' reach\n (way to go Germany..). \\n\n \\n\n Alrgiht, now you're back outside\n and your nipps are about to freeze off!\n Where are you going to go now?\n Time for the HSV bar,\n or are you done with the world and want to just go home?\n \"\"\"\n choice = raw_input(\"> \")\n if choice == \"HSV bar\":\n print \"To the HSV bar!\"\n way_bar()\n elif choice == \"go home\":\n print \"Homeward bound!\"\n way_inside()\n else:\n die()\n\n# Outside - Rewe - way to the bar\ndef way_bar():\n print \"\"\"\n You managed to grab a box of some good ol' German Schnapps!\n These 12 little babies are gonna get you in the right kind of mood.\n You have about a 5 to 10 minute walk ahead of you..\n How many mini bottles of goodness will you gulp down on the way?\n \"\"\"\n choice = raw_input(\"> \")\n how_much = int(choice)\n\n if how_much < 3:\n bar_sober()\n elif 6 > how_much >= 3:\n bar_buzzed()\n else:\n loose(\"\"\"\n Well, I mean you did want to get wierd tonight,\n but this just escalated far too quickly! You need\n to get ahold of yourself! Now you've thrown your cookies\n all over the sidewalk..Though I am a bit proud of you, you better just\n go home and sleep..if you can find your way.\n \"\"\")\n\n# Outside - Rewe - way back home\ndef way_inside():\n print \"\"\"\n You managed to grab a box of some good ol' German Schnapps!\n These 12 little babies are gonna get you in the right kind of mood.\n You have about a 5 to 10 minute walk ahead of you..\n How many mini bottles of goodness will you gulp down on the way?\n \"\"\"\n choice = raw_input(\"> \")\n how_much = int(choice)\n\n if how_much < 3:\n inside_sober()\n elif 6 > how_much >= 3:\n inside_buzzed()\n else:\n loose(\"\"\"\n Well, I mean you did want to get wierd tonight,\n but this just escalated far too quickly! You need\n to get ahold of yourself! Now you've thrown your cookies\n all over the sidewalk..Though I am a bit proud of you, you better just\n go home and sleep..if you can find your way.\n \"\"\")\n\n# Outside - Rewe - Inside(buzzed)\ndef inside_buzzed():\n print \"\"\"\n Now you're a little buzzed and all warm in your humble abode!\n You could kick it here with ya bad self, or ask if some peeps want to\n come over.\n Do you want to invite people to come get wrecked with you?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"yes\":\n print \"Okay, let's get this party goin'!\"\n inside_buzzed_invite()\n elif choice == \"no\":\n print \"There's only enough liquor for numero uno.\"\n inside_buzzed_alone()\n else:\n die()\n\n\n# Outside - Rewe - Inside(buzzed) Invite\ndef inside_buzzed_invite():\n print \"\"\"\n Congrats..\n Both of your two friends are busy.\n Well, so much for trying to be social!\n Guess you'll be hanging out alone after all.\n \"\"\"\n inside_buzzed_alone()\n\n# Outside - Rewe - Inside(buzzed) ALone\ndef inside_buzzed_alone():\n print \"\"\"\n Now you're a little buzzed and all warm in your humble abode!\n Time to watch episodes of 'Intervention'\n and drink everytime someone makes a worse life choice\n than you have!\n \"\"\"\n win(\"Yay for not being at the very bottom!\")\n\n# Inside (sober)\ndef inside_sober():\n print \"\"\"\n Alright alright alright.\n You could kick it here with ya bad self, or ask if some peeps want to\n come over.\n Do you want to invite people to come get wrecked with you?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"yes\":\n print \"It'll be nice to have some social interaction.\"\n inside_sober_invite()\n elif choice == \"no\":\n print \"Ew gross, people are icky.\"\n inside_sober_alone()\n else:\n die()\n\n# Inside (sober) invite\ndef inside_sober_invite():\n print \"\"\"\n Wow you're feeling socially acceptable today!\n Three people are now at your place and you don't have much alcohol.\n Way to go, you anti-social worm.\n You're not drunk enough to be entertaining!\n You forgot you can't handle being responsible for social encounters.\n Akwardness.\n Overwhelms.\n You!\n \"\"\"\n loose(\"You're an anxious mess.\")\n\n# Inside(sober) - Alone\ndef inside_sober_alone():\n print \"\"\"\n Wohoo! Time to drink what you've got and play some sweet video games until your eyes bleed!\n Who needs other people to enjoy themselves?\n Being socially awkward rules!\n And the best part is:\n You don't have to wear pants!\n \"\"\"\n win(\"This is the (anti-social) life!\")\n\n\n# Outside - Rewe - bar(buzzed)\ndef bar_buzzed():\n print \"\"\"\n On the way to the bar, you see the disco lights flashing\n and you can here the German Schlager music being accompanied\n by the voices of old people.\n Nice.\n The few bottles of liquor you drank are kicking in just in time!\n You've consumed the perfect amount for this kind of thing!\n Once you get in the bar everyone cheers, even though you don't know them!\n Some old lady is celebrating the death of her husband and buying rounds\n for everyone.\n \"\"\"\n win(\"You hit the party jackpot!\")\n\n# Outside - Bar(sober)\ndef bar_sober():\n print \"\"\"\n So now you're inside, and people seem to be having a good time.\n The problem is: they are drunk; you are not!\n You then realize that you can't pay with card here\n and you don't have enough cash for a drink..\n Even if you brought booze with you, you wouldn't be able to\n drink it in here. Way to go..\n Because you're too sober to be socially acceptable, you can't\n find the courage to ask the people celebrating if you can join.\n \"\"\"\n loose(\"You're uncomfortable and go home as the anxious mess that alaways had been.\")\n\n\n# End of game, added to the variable of \"why\"\ndef win(why):\n print why, \" Bitchin'.\"\n exit(0)\n\ndef loose(why):\n print why, \" Laaame..\"\n exit (0)\n\ndef die():\n print \"\"\"\n How dare you think out of the box?! You die sober!!\n \"\"\"\n\n\n# Begining of game\ndef start():\n print \"\"\"\n It's Friday night and you want to get hammered!\n Do you want to go out or stay home?\n \"\"\"\n choice = raw_input(\"> \")\n\n if choice == \"out\":\n outside()\n elif choice == \"stay home\":\n inside_sober()\n else:\n die()\n\nstart()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
""" Make sure overwriting read-only files works as expected (via win-tool). """ import TestGyp import filecmp import os import stat import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['ninja']) os.makedirs('subdir') read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C'] for f in read_only_files: test.write(f, 'source_contents') test.chmod(f, stat.S_IREAD) if os.access(f, os.W_OK): test.fail_test() os.makedirs(test.built_file_path('dest/subdir')) for f in read_only_files: f = os.path.join('dest', f) test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN') test.chmod(test.built_file_path(f), stat.S_IREAD) if os.access(test.built_file_path(f), os.W_OK): test.fail_test() test.run_gyp('copies_readonly_files.gyp') test.build('copies_readonly_files.gyp') for f in read_only_files: f = os.path.join('dest', f) test.must_contain(test.built_file_path(f), 'source_contents') for f in read_only_files: if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))): test.fail_test() test.pass_test()
normal
{ "blob_id": "efe5921afb160b7b5a953cdd0c2f90f64b5f34c9", "index": 5975, "step-1": "<mask token>\n", "step-2": "<mask token>\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n test.pass_test()\n", "step-3": "<mask token>\nimport TestGyp\nimport filecmp\nimport os\nimport stat\nimport sys\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n test.pass_test()\n", "step-4": "\n\n\n\n\n\n\"\"\"\nMake sure overwriting read-only files works as expected (via win-tool).\n\"\"\"\n\nimport TestGyp\n\nimport filecmp\nimport os\nimport stat\nimport sys\n\nif sys.platform == 'win32':\n test = TestGyp.TestGyp(formats=['ninja'])\n\n \n os.makedirs('subdir')\n read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']\n for f in read_only_files:\n test.write(f, 'source_contents')\n test.chmod(f, stat.S_IREAD)\n if os.access(f, os.W_OK):\n test.fail_test()\n\n \n \n \n os.makedirs(test.built_file_path('dest/subdir'))\n for f in read_only_files:\n f = os.path.join('dest', f)\n test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')\n test.chmod(test.built_file_path(f), stat.S_IREAD)\n \n if os.access(test.built_file_path(f), os.W_OK):\n test.fail_test()\n\n test.run_gyp('copies_readonly_files.gyp')\n test.build('copies_readonly_files.gyp')\n\n \n for f in read_only_files:\n f = os.path.join('dest', f)\n test.must_contain(test.built_file_path(f), 'source_contents')\n\n \n for f in read_only_files:\n if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):\n test.fail_test()\n\n test.pass_test()\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
input_object = open("input.txt", "r") input_data = input_object.readlines() input_object.close() cleaned_data = [] for line in input_data: cleaned_data.append(int(line.strip())) input_size = len(cleaned_data) for i in range(0, input_size): for j in range(i, input_size): for k in range(j, input_size): if cleaned_data[i] + cleaned_data[j] + cleaned_data[k] == 2020: ans = cleaned_data[i]*cleaned_data[j]*cleaned_data[k] print(ans) break
normal
{ "blob_id": "72f3ae476581ff5acd6c7101764f4764285a47bd", "index": 4426, "step-1": "<mask token>\n", "step-2": "<mask token>\ninput_object.close()\n<mask token>\nfor line in input_data:\n cleaned_data.append(int(line.strip()))\n<mask token>\nfor i in range(0, input_size):\n for j in range(i, input_size):\n for k in range(j, input_size):\n if cleaned_data[i] + cleaned_data[j] + cleaned_data[k] == 2020:\n ans = cleaned_data[i] * cleaned_data[j] * cleaned_data[k]\n print(ans)\n break\n", "step-3": "input_object = open('input.txt', 'r')\ninput_data = input_object.readlines()\ninput_object.close()\ncleaned_data = []\nfor line in input_data:\n cleaned_data.append(int(line.strip()))\ninput_size = len(cleaned_data)\nfor i in range(0, input_size):\n for j in range(i, input_size):\n for k in range(j, input_size):\n if cleaned_data[i] + cleaned_data[j] + cleaned_data[k] == 2020:\n ans = cleaned_data[i] * cleaned_data[j] * cleaned_data[k]\n print(ans)\n break\n", "step-4": "input_object = open(\"input.txt\", \"r\")\ninput_data = input_object.readlines()\ninput_object.close()\ncleaned_data = []\n\nfor line in input_data:\n cleaned_data.append(int(line.strip()))\ninput_size = len(cleaned_data)\n\n\nfor i in range(0, input_size):\n for j in range(i, input_size):\n for k in range(j, input_size):\n if cleaned_data[i] + cleaned_data[j] + cleaned_data[k] == 2020:\n ans = cleaned_data[i]*cleaned_data[j]*cleaned_data[k]\n print(ans)\n break", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/python # Point of origin (connector J3, pad 1, net 3V3) x = 0.0 y = 0.0 drillDiameter = 1.0 padWidth = 1.6 from os.path import exists from pad import * filename="iCEstick.kicad_mod" header = "" footer = "" if exists(filename): # Read existing footprint f = open(filename) footprint = f.read() f.close() # Find the end of the header headerEndIndex = footprint.find("(pad ") header = footprint[:headerEndIndex] # Find the end of the pads list lastPadIndex = headerEndIndex while (footprint.find("(pad ", lastPadIndex) > -1): lastPadIndex = footprint.find("(pad ", lastPadIndex) + 5 footerStartIndex = footprint.find("))", lastPadIndex) + 2 footer = footprint[footerStartIndex:] if header.find("TE-Connectivity") < 0: header = \ """(module iCEstick (layer F.Cu) (tedit 5BD73D6F) (fp_text reference REF** (at 0 -12.7) (layer F.SilkS) (effects (font (size 1 1) (thickness 0.15))) ) (fp_text value iCEstick (at 0 25.4) (layer F.Fab) (effects (font (size 1 1) (thickness 0.15))) ) """ footer = ")" # # Generate pads according to schematic drawing # designators_j1 = ["3V3", "GND"] + [str(n) for n in range(112,120)] designators_j2 = [ \ [str(n) for n in range(78,82)] + ["GND", "3V3"], \ ["87", "88", "90", "91", "GND", "3V3"] \ ] designators_j3 = ["3V3", "GND", "62", "61", "60", "56", "48", "47", "45", "44"] # # J1 connector pad list # pads_j1 = [] oldX = x oldY = y y -= 21.81 for i in range(10): # The first pad is a rectangle, the remaining ones are circular if (i == 0): shape = Shape.RECT else: shape = Shape.CIRCLE # Create pad object newPad = Pad( designator = designators_j1[i], through_hole = True, plated = True, shape = shape, at = (x, y), size = (padWidth, padWidth), drill = drillDiameter ) pads_j1 += [newPad] x -= 2.54 # # J2 connector pad list # pads_j2 = [] x = oldX - 5.80 newY = oldY - 21.81 + 4.49 + 5*2.54 y = newY for i in range(6): # The first pad is a rectangle, the remaining ones are circular if (i == 0): shape = Shape.RECT else: shape = Shape.CIRCLE # Create pad object newPad = Pad( designator = designators_j2[0][i], through_hole = True, plated = True, shape = shape, at = (x, y), size = (padWidth, padWidth), drill = drillDiameter ) pads_j2 += [newPad] y -= 2.54 # Second (inner) row of pins of J2 x -= 2.54 y = newY for i in range(6): # Create pad object newPad = Pad( designator = designators_j2[1][i], through_hole = True, plated = True, shape = Shape.CIRCLE, at = (x, y), size = (padWidth, padWidth), drill = drillDiameter ) pads_j2 += [newPad] y -= 2.54 # # J3 connector pad list # pads_j3 = [] x = oldX y = oldY for i in range(10): # The first pad is a rectangle, the remaining ones are circular if (i == 0): shape = Shape.RECT else: shape = Shape.CIRCLE # Create pad object newPad = Pad( designator = designators_j3[i], through_hole = True, plated = True, shape = shape, at = (x, y), size = (padWidth, padWidth), drill = drillDiameter ) pads_j1 += [newPad] x -= 2.54 # Make a list of all pads pads = pads_j1 + pads_j2 + pads_j3 # Compose new footprint from header, pads and footer newFootprint = header for pad in pads: newFootprint += str(pad) + "\n" newFootprint += footer.strip() # Print generated footprint to screen print(newFootprint) # Save generated footprint to file f = open(filename, "w") f.write(newFootprint) f.close()
normal
{ "blob_id": "c71e367ad320d7eadabbbfda728d94448db6441d", "index": 2109, "step-1": "<mask token>\n", "step-2": "<mask token>\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\n<mask token>\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\n<mask token>\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\n<mask token>\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\n<mask token>\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\n<mask token>\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\n<mask token>\nf.write(newFootprint)\nf.close()\n", "step-3": "x = 0.0\ny = 0.0\ndrillDiameter = 1.0\npadWidth = 1.6\n<mask token>\nfilename = 'iCEstick.kicad_mod'\nheader = ''\nfooter = ''\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\ndesignators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]\ndesignators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',\n '88', '90', '91', 'GND', '3V3']]\ndesignators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads_j2 = []\nx = oldX - 5.8\nnewY = oldY - 21.81 + 4.49 + 5 * 2.54\ny = newY\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\ny = newY\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads = pads_j1 + pads_j2 + pads_j3\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\nf = open(filename, 'w')\nf.write(newFootprint)\nf.close()\n", "step-4": "x = 0.0\ny = 0.0\ndrillDiameter = 1.0\npadWidth = 1.6\nfrom os.path import exists\nfrom pad import *\nfilename = 'iCEstick.kicad_mod'\nheader = ''\nfooter = ''\nif exists(filename):\n f = open(filename)\n footprint = f.read()\n f.close()\n headerEndIndex = footprint.find('(pad ')\n header = footprint[:headerEndIndex]\n lastPadIndex = headerEndIndex\n while footprint.find('(pad ', lastPadIndex) > -1:\n lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5\n footerStartIndex = footprint.find('))', lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\nif header.find('TE-Connectivity') < 0:\n header = \"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = ')'\ndesignators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]\ndesignators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',\n '88', '90', '91', 'GND', '3V3']]\ndesignators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j1[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads_j2 = []\nx = oldX - 5.8\nnewY = oldY - 21.81 + 4.49 + 5 * 2.54\ny = newY\nfor i in range(6):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated\n =True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\nx -= 2.54\ny = newY\nfor i in range(6):\n newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated\n =True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),\n drill=drillDiameter)\n pads_j2 += [newPad]\n y -= 2.54\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n if i == 0:\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n newPad = Pad(designator=designators_j3[i], through_hole=True, plated=\n True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=\n drillDiameter)\n pads_j1 += [newPad]\n x -= 2.54\npads = pads_j1 + pads_j2 + pads_j3\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + '\\n'\nnewFootprint += footer.strip()\nprint(newFootprint)\nf = open(filename, 'w')\nf.write(newFootprint)\nf.close()\n", "step-5": "#!/usr/bin/python\n\n# Point of origin (connector J3, pad 1, net 3V3)\nx = 0.0\ny = 0.0\n\ndrillDiameter = 1.0\npadWidth = 1.6\n\n\nfrom os.path import exists\nfrom pad import *\n\nfilename=\"iCEstick.kicad_mod\"\n\nheader = \"\"\nfooter = \"\"\n\nif exists(filename):\n # Read existing footprint\n f = open(filename)\n footprint = f.read()\n f.close()\n \n # Find the end of the header\n headerEndIndex = footprint.find(\"(pad \")\n header = footprint[:headerEndIndex]\n \n # Find the end of the pads list\n lastPadIndex = headerEndIndex\n while (footprint.find(\"(pad \", lastPadIndex) > -1):\n lastPadIndex = footprint.find(\"(pad \", lastPadIndex) + 5\n \n footerStartIndex = footprint.find(\"))\", lastPadIndex) + 2\n footer = footprint[footerStartIndex:]\n\nif header.find(\"TE-Connectivity\") < 0:\n header = \\\n\"\"\"(module iCEstick (layer F.Cu) (tedit 5BD73D6F)\n (fp_text reference REF** (at 0 -12.7) (layer F.SilkS)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n (fp_text value iCEstick (at 0 25.4) (layer F.Fab)\n (effects (font (size 1 1) (thickness 0.15)))\n )\n\"\"\"\n footer = \")\"\n\n#\n# Generate pads according to schematic drawing\n#\n\ndesignators_j1 = [\"3V3\", \"GND\"] + [str(n) for n in range(112,120)]\n\ndesignators_j2 = [ \\\n [str(n) for n in range(78,82)] + [\"GND\", \"3V3\"], \\\n [\"87\", \"88\", \"90\", \"91\", \"GND\", \"3V3\"] \\\n ]\n\ndesignators_j3 = [\"3V3\", \"GND\", \"62\", \"61\", \"60\", \"56\", \"48\", \"47\", \"45\", \"44\"]\n\n#\n# J1 connector pad list\n#\npads_j1 = []\noldX = x\noldY = y\ny -= 21.81\nfor i in range(10):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j1[i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j1 += [newPad]\n x -= 2.54\n\n#\n# J2 connector pad list\n#\npads_j2 = []\nx = oldX - 5.80\nnewY = oldY - 21.81 + 4.49 + 5*2.54\ny = newY\nfor i in range(6):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j2[0][i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j2 += [newPad]\n y -= 2.54\n\n# Second (inner) row of pins of J2\nx -= 2.54\ny = newY\nfor i in range(6):\n # Create pad object\n newPad = Pad(\n designator = designators_j2[1][i],\n through_hole = True,\n plated = True,\n shape = Shape.CIRCLE,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j2 += [newPad]\n y -= 2.54\n\n#\n# J3 connector pad list\n#\npads_j3 = []\nx = oldX\ny = oldY\nfor i in range(10):\n # The first pad is a rectangle, the remaining ones are circular\n if (i == 0):\n shape = Shape.RECT\n else:\n shape = Shape.CIRCLE\n \n # Create pad object\n newPad = Pad(\n designator = designators_j3[i],\n through_hole = True,\n plated = True,\n shape = shape,\n at = (x, y),\n size = (padWidth, padWidth),\n drill = drillDiameter\n )\n pads_j1 += [newPad]\n x -= 2.54\n\n# Make a list of all pads\npads = pads_j1 + pads_j2 + pads_j3\n\n# Compose new footprint from header, pads and footer\nnewFootprint = header\nfor pad in pads:\n newFootprint += str(pad) + \"\\n\"\nnewFootprint += footer.strip()\n\n# Print generated footprint to screen\nprint(newFootprint)\n\n# Save generated footprint to file\nf = open(filename, \"w\")\nf.write(newFootprint)\nf.close()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import math import pygame from TestingFunctions.FunctionExample import FunctionExample class FunctionPygameCircle(FunctionExample): def __init__(self, data_len, width=500, height=500, dot_size=5): self.angle = (2 * math.pi) / (data_len) self.width = width self.height = height self.dot_size = dot_size def setup(self): pygame.init() self.screen = pygame.display.set_mode([self.width, self.height]) pygame.key.set_repeat(100, 50) self.screen.fill([0, 0, 0]) def run(self, data): return pygame.draw.circle(self.screen, [150, 0, 150], [int(self.width / 2 - math.cos(self.angle * data) * (self.width / 2 - self.dot_size)), int(self.height / 2 - math.sin(self.angle * data) * ( self.height / 2 - self.dot_size))], self.dot_size) def run_no_return(self, data): pygame.draw.circle(self.screen, [150, 0, 150], [int(self.width / 2 - math.cos(self.angle * data) * (self.width / 2 - self.dot_size)), int(self.height / 2 - math.sin(self.angle * data) * (self.height / 2 - self.dot_size))], self.dot_size) def reset(self): pygame.display.flip() self.screen.fill([0, 0, 0]) def close(self): pygame.quit()
normal
{ "blob_id": "2faf39f8d12197e20948b2bf4288b7ee406f5b86", "index": 2025, "step-1": "<mask token>\n\n\nclass FunctionPygameCircle(FunctionExample):\n\n def __init__(self, data_len, width=500, height=500, dot_size=5):\n self.angle = 2 * math.pi / data_len\n self.width = width\n self.height = height\n self.dot_size = dot_size\n <mask token>\n <mask token>\n <mask token>\n\n def reset(self):\n pygame.display.flip()\n self.screen.fill([0, 0, 0])\n\n def close(self):\n pygame.quit()\n", "step-2": "<mask token>\n\n\nclass FunctionPygameCircle(FunctionExample):\n\n def __init__(self, data_len, width=500, height=500, dot_size=5):\n self.angle = 2 * math.pi / data_len\n self.width = width\n self.height = height\n self.dot_size = dot_size\n <mask token>\n\n def run(self, data):\n return pygame.draw.circle(self.screen, [150, 0, 150], [int(self.\n width / 2 - math.cos(self.angle * data) * (self.width / 2 -\n self.dot_size)), int(self.height / 2 - math.sin(self.angle *\n data) * (self.height / 2 - self.dot_size))], self.dot_size)\n\n def run_no_return(self, data):\n pygame.draw.circle(self.screen, [150, 0, 150], [int(self.width / 2 -\n math.cos(self.angle * data) * (self.width / 2 - self.dot_size)),\n int(self.height / 2 - math.sin(self.angle * data) * (self.\n height / 2 - self.dot_size))], self.dot_size)\n\n def reset(self):\n pygame.display.flip()\n self.screen.fill([0, 0, 0])\n\n def close(self):\n pygame.quit()\n", "step-3": "<mask token>\n\n\nclass FunctionPygameCircle(FunctionExample):\n\n def __init__(self, data_len, width=500, height=500, dot_size=5):\n self.angle = 2 * math.pi / data_len\n self.width = width\n self.height = height\n self.dot_size = dot_size\n\n def setup(self):\n pygame.init()\n self.screen = pygame.display.set_mode([self.width, self.height])\n pygame.key.set_repeat(100, 50)\n self.screen.fill([0, 0, 0])\n\n def run(self, data):\n return pygame.draw.circle(self.screen, [150, 0, 150], [int(self.\n width / 2 - math.cos(self.angle * data) * (self.width / 2 -\n self.dot_size)), int(self.height / 2 - math.sin(self.angle *\n data) * (self.height / 2 - self.dot_size))], self.dot_size)\n\n def run_no_return(self, data):\n pygame.draw.circle(self.screen, [150, 0, 150], [int(self.width / 2 -\n math.cos(self.angle * data) * (self.width / 2 - self.dot_size)),\n int(self.height / 2 - math.sin(self.angle * data) * (self.\n height / 2 - self.dot_size))], self.dot_size)\n\n def reset(self):\n pygame.display.flip()\n self.screen.fill([0, 0, 0])\n\n def close(self):\n pygame.quit()\n", "step-4": "import math\nimport pygame\nfrom TestingFunctions.FunctionExample import FunctionExample\n\n\nclass FunctionPygameCircle(FunctionExample):\n\n def __init__(self, data_len, width=500, height=500, dot_size=5):\n self.angle = 2 * math.pi / data_len\n self.width = width\n self.height = height\n self.dot_size = dot_size\n\n def setup(self):\n pygame.init()\n self.screen = pygame.display.set_mode([self.width, self.height])\n pygame.key.set_repeat(100, 50)\n self.screen.fill([0, 0, 0])\n\n def run(self, data):\n return pygame.draw.circle(self.screen, [150, 0, 150], [int(self.\n width / 2 - math.cos(self.angle * data) * (self.width / 2 -\n self.dot_size)), int(self.height / 2 - math.sin(self.angle *\n data) * (self.height / 2 - self.dot_size))], self.dot_size)\n\n def run_no_return(self, data):\n pygame.draw.circle(self.screen, [150, 0, 150], [int(self.width / 2 -\n math.cos(self.angle * data) * (self.width / 2 - self.dot_size)),\n int(self.height / 2 - math.sin(self.angle * data) * (self.\n height / 2 - self.dot_size))], self.dot_size)\n\n def reset(self):\n pygame.display.flip()\n self.screen.fill([0, 0, 0])\n\n def close(self):\n pygame.quit()\n", "step-5": "import math\nimport pygame\n\nfrom TestingFunctions.FunctionExample import FunctionExample\n\n\nclass FunctionPygameCircle(FunctionExample):\n def __init__(self, data_len, width=500, height=500, dot_size=5):\n self.angle = (2 * math.pi) / (data_len)\n self.width = width\n self.height = height\n self.dot_size = dot_size\n\n def setup(self):\n pygame.init()\n self.screen = pygame.display.set_mode([self.width, self.height])\n pygame.key.set_repeat(100, 50)\n self.screen.fill([0, 0, 0])\n\n def run(self, data):\n return pygame.draw.circle(self.screen, [150, 0, 150],\n [int(self.width / 2 - math.cos(self.angle * data) * (self.width / 2 - self.dot_size)),\n int(self.height / 2 - math.sin(self.angle * data) * (\n self.height / 2 - self.dot_size))],\n self.dot_size)\n\n def run_no_return(self, data):\n pygame.draw.circle(self.screen, [150, 0, 150],\n [int(self.width / 2 - math.cos(self.angle * data) * (self.width / 2 - self.dot_size)),\n int(self.height / 2 - math.sin(self.angle * data) * (self.height / 2 - self.dot_size))],\n self.dot_size)\n\n def reset(self):\n pygame.display.flip()\n self.screen.fill([0, 0, 0])\n\n def close(self):\n pygame.quit()\n", "step-ids": [ 4, 6, 7, 8, 9 ] }
[ 4, 6, 7, 8, 9 ]
import os from googleapiclient.discovery import build import httplib2 from oauth2client import gce from oauth2client.appengine import AppAssertionCredentials from oauth2client.file import Storage __author__ = 'ekampf' import json import logging import apiclient.errors from apiclient import http as apiclient_request from apiclient import model as apiclient_model from .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \ BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError # pylint: disable=E1002 class BigQueryModel(apiclient_model.JsonModel): """Adds optional global parameters to all requests.""" def __init__(self, trace=None, **kwargs): super(BigQueryModel, self).__init__(**kwargs) self.trace = trace def request(self, headers, path_params, query_params, body_value): """Updates outgoing request.""" if 'trace' not in query_params and self.trace: query_params['trace'] = self.trace return super(BigQueryModel, self).request(headers, path_params, query_params, body_value) # pylint: disable=E1002 class BigQueryHttp(apiclient_request.HttpRequest): """Converts errors into BigQuery errors.""" def __init__(self, http_model, *args, **kwargs): super(BigQueryHttp, self).__init__(*args, **kwargs) self._model = http_model @staticmethod def factory(bigquery_model): """Returns a function that creates a BigQueryHttp with the given model.""" def _create_bigquery_http_request(*args, **kwargs): captured_model = bigquery_model return BigQueryHttp(captured_model, *args, **kwargs) return _create_bigquery_http_request def execute(self, **kwargs): try: return super(BigQueryHttp, self).execute(**kwargs) except apiclient.errors.HttpError, e: # TODO(user): Remove this when apiclient supports logging of error responses. self._model._log_response(e.resp, e.content) if e.resp.get('content-type', '').startswith('application/json'): result = json.loads(e.content) error = result.get('error', {}).get('errors', [{}])[0] raise BigQueryError.create(error, result, []) else: raise BigQueryCommunicationError( ('Could not connect with BigQuery server.\n' 'Http response status: %s\n' 'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content)) class BigQueryClient(object): def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None): """ :param trace: A value to add to all outgoing requests :return: """ super(BigQueryClient, self).__init__() self.trace = trace self.use_jwt_credentials_auth = use_jwt_credentials_auth self.jwt_account_name = jwt_account_name self.jwt_key_func = jwt_key_func self.oauth_credentails_file = oauth_credentails_file ###### Wrapping BigQuery's API def datasets(self): return self.api_client.datasets() def jobs(self): return self.api_client.jobs() def projects(self): return self.api_client.projects() def tabledata(self): return self.api_client.tabledata() def tables(self): return self.api_client.tables() def get_http_for_request(self): if self.use_jwt_credentials_auth: # Local debugging using pem file scope = 'https://www.googleapis.com/auth/bigquery' from oauth2client.client import SignedJwtAssertionCredentials credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope) logging.info("Using Standard jwt authentication") return credentials.authorize(httplib2.Http()) elif self.is_in_appengine(): # App engine from google.appengine.api import memcache scope = 'https://www.googleapis.com/auth/bigquery' credentials = AppAssertionCredentials(scope=scope) logging.info("Using Standard appengine authentication") return credentials.authorize(httplib2.Http(memcache)) elif self.oauth_credentails_file: # Local oauth token http = httplib2.Http() storage = Storage(self.oauth_credentails_file) credentials = storage.get() if not credentials: raise EnvironmentError('No credential file present') http = credentials.authorize(http) credentials.refresh(http) logging.info("Using Standard OAuth authentication") return http elif self.is_in_gce_machine(): # GCE authorization http = httplib2.Http() credentials = gce.AppAssertionCredentials('') http = credentials.authorize(http) credentials.refresh(http) logging.info("Using GCE authentication") return http raise BigQueryAuthorizationError() @staticmethod def is_in_appengine(): 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/') @staticmethod def is_in_gce_machine(): try: metadata_uri = 'http://metadata.google.internal' http = httplib2.Http() http.request(metadata_uri, method='GET') return True except httplib2.ServerNotFoundError: return False @property def api_client(self): bigquery_model = BigQueryModel(trace=self.trace) bigquery_http = BigQueryHttp.factory(bigquery_model) http = self.get_http_for_request() return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http) ###### Utility methods # tables() methods def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False, description=None, friendly_name=None, expiration=None): logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id) body = { 'tableReference': { 'tableId': table_id, 'datasetId': dataset_id, 'projectId': project_id }, 'schema': { 'fields': fields } } if friendly_name is not None: body['friendlyName'] = friendly_name if description is not None: body['description'] = description if expiration is not None: body['expirationTime'] = expiration try: logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body) response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute() logging.info('%s create table response %s', project_id, response) return response except BigQueryDuplicateError: if not ignore_existing: raise # tabledata() methods def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False): """Streams data into BigQuery one record at a time without needing to run a load job. :param application_id: Project ID of the destination table. (required) :param dataset_id: Dataset ID of the destination table. (required) :param table_id: Table ID of the destination table. (required) :param insert_id_generator: lambda that gets a row and generates an insertId. :param rows: The rows to insert (array or single object) :param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped"). So we filter out "invalid" rows and do a 2nd pass. Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error. :return: A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll). If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...) """ if isinstance(rows, dict): rows = [rows] if insert_id_generator is not None: rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows] else: rows_json = [{'json': r} for r in rows] body = {"rows": rows_json} try: logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id) response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute() if 'insertErrors' in response: insert_errors = response['insertErrors'] insert_errors_json = json.dumps(insert_errors) if insert_errors_json.find('Maximum allowed row size exceeded') > -1: raise BigQueryStreamingMaximumRowSizeExceededError() logging.error("Failed to insert rows:\n%s", insert_errors_json) if ignore_invalid_rows: invalid_indices = [err['index'] for err in insert_errors if any([x['reason'] == 'invalid' for x in err['errors']])] rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices] body_pass2 = {"rows": rows_json_pass2} response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute() return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2))) logging.info("Successfully inserted %s rows", len(rows)) return response except BigQueryError as ex: logging.exception(ex.message) raise # jobs() methods def create_insert_job(self, project_id, dataset_id, table_id, gcs_links): job_data = { 'projectId': project_id, 'configuration': { 'load': { 'sourceFormat': 'NEWLINE_DELIMITED_JSON', 'writeDisposition': 'WRITE_APPEND', 'sourceUris': ['gs:/%s' % s for s in gcs_links], 'destinationTable': { 'projectId': project_id, 'datasetId': dataset_id, 'tableId': table_id }, } } } logging.info('about to insert job:%s', job_data) try: job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute() status = job['status'] if 'errorResult' in status: raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference']) return job except BigQueryError as ex: logging.exception(ex) raise def monitor_insert_job(self, project_id, job_id): try: logging.info('about to monitor job: %s', job_id) job = self.api_client.jobs().get(project_id, job_id) logging.info('Got job response: %s', job) state = job['status']['state'] if state == 'DONE': logging.info("Job %s is done loading!", job_id) if 'errorResult' in job['status']: raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id}) except BigQueryError as ex: logging.exception(ex) raise def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None): """Retrieves the results of a query job. :param project_id: Project ID of the query job. :param job_id: Job ID of the query job. :param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error. :param pageToken: string, Page token, returned by a previous call, to request the next page of results :param maxResults: integer, Maximum number of results to read :param startIndex: string, Zero-based index of the starting row :return: """ try: return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex) except BigQueryError as ex: logging.exception(ex) raise
normal
{ "blob_id": "10d5eef304a3d293441169ebde1f7859537c4b6e", "index": 9847, "step-1": "import os\nfrom googleapiclient.discovery import build\nimport httplib2\nfrom oauth2client import gce\nfrom oauth2client.appengine import AppAssertionCredentials\nfrom oauth2client.file import Storage\n\n__author__ = 'ekampf'\n\n\nimport json\nimport logging\n\nimport apiclient.errors\nfrom apiclient import http as apiclient_request\nfrom apiclient import model as apiclient_model\nfrom .errors import BigQueryError, BigQueryCommunicationError, BigQueryDuplicateError, \\\n BigQueryStreamingMaximumRowSizeExceededError, BigQueryAuthorizationError\n\n\n# pylint: disable=E1002\nclass BigQueryModel(apiclient_model.JsonModel):\n \"\"\"Adds optional global parameters to all requests.\"\"\"\n\n def __init__(self, trace=None, **kwargs):\n super(BigQueryModel, self).__init__(**kwargs)\n self.trace = trace\n\n def request(self, headers, path_params, query_params, body_value):\n \"\"\"Updates outgoing request.\"\"\"\n if 'trace' not in query_params and self.trace:\n query_params['trace'] = self.trace\n\n return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)\n\n\n# pylint: disable=E1002\nclass BigQueryHttp(apiclient_request.HttpRequest):\n \"\"\"Converts errors into BigQuery errors.\"\"\"\n\n def __init__(self, http_model, *args, **kwargs):\n super(BigQueryHttp, self).__init__(*args, **kwargs)\n self._model = http_model\n\n @staticmethod\n def factory(bigquery_model):\n \"\"\"Returns a function that creates a BigQueryHttp with the given model.\"\"\"\n def _create_bigquery_http_request(*args, **kwargs):\n captured_model = bigquery_model\n return BigQueryHttp(captured_model, *args, **kwargs)\n\n return _create_bigquery_http_request\n\n\n def execute(self, **kwargs):\n try:\n return super(BigQueryHttp, self).execute(**kwargs)\n except apiclient.errors.HttpError, e:\n # TODO(user): Remove this when apiclient supports logging of error responses.\n self._model._log_response(e.resp, e.content)\n\n if e.resp.get('content-type', '').startswith('application/json'):\n result = json.loads(e.content)\n error = result.get('error', {}).get('errors', [{}])[0]\n raise BigQueryError.create(error, result, [])\n else:\n raise BigQueryCommunicationError(\n ('Could not connect with BigQuery server.\\n'\n 'Http response status: %s\\n'\n 'Http response content:\\n%s') % (e.resp.get('status', '(unexpected)'), e.content))\n\n\nclass BigQueryClient(object):\n def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):\n \"\"\"\n :param trace: A value to add to all outgoing requests\n :return:\n \"\"\"\n super(BigQueryClient, self).__init__()\n self.trace = trace\n self.use_jwt_credentials_auth = use_jwt_credentials_auth\n self.jwt_account_name = jwt_account_name\n self.jwt_key_func = jwt_key_func\n self.oauth_credentails_file = oauth_credentails_file\n\n ###### Wrapping BigQuery's API\n\n def datasets(self):\n return self.api_client.datasets()\n\n def jobs(self):\n return self.api_client.jobs()\n\n def projects(self):\n return self.api_client.projects()\n\n def tabledata(self):\n return self.api_client.tabledata()\n\n def tables(self):\n return self.api_client.tables()\n\n def get_http_for_request(self):\n if self.use_jwt_credentials_auth: # Local debugging using pem file\n scope = 'https://www.googleapis.com/auth/bigquery'\n from oauth2client.client import SignedJwtAssertionCredentials\n credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)\n logging.info(\"Using Standard jwt authentication\")\n return credentials.authorize(httplib2.Http())\n\n elif self.is_in_appengine(): # App engine\n from google.appengine.api import memcache\n scope = 'https://www.googleapis.com/auth/bigquery'\n credentials = AppAssertionCredentials(scope=scope)\n logging.info(\"Using Standard appengine authentication\")\n return credentials.authorize(httplib2.Http(memcache))\n\n elif self.oauth_credentails_file: # Local oauth token\n http = httplib2.Http()\n storage = Storage(self.oauth_credentails_file)\n credentials = storage.get()\n if not credentials:\n raise EnvironmentError('No credential file present')\n http = credentials.authorize(http)\n credentials.refresh(http)\n logging.info(\"Using Standard OAuth authentication\")\n return http\n\n elif self.is_in_gce_machine(): # GCE authorization\n http = httplib2.Http()\n credentials = gce.AppAssertionCredentials('')\n http = credentials.authorize(http)\n credentials.refresh(http)\n logging.info(\"Using GCE authentication\")\n return http\n\n raise BigQueryAuthorizationError()\n\n @staticmethod\n def is_in_appengine():\n 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')\n\n @staticmethod\n def is_in_gce_machine():\n try:\n metadata_uri = 'http://metadata.google.internal'\n http = httplib2.Http()\n http.request(metadata_uri, method='GET')\n return True\n except httplib2.ServerNotFoundError:\n return False\n\n\n @property\n def api_client(self):\n bigquery_model = BigQueryModel(trace=self.trace)\n bigquery_http = BigQueryHttp.factory(bigquery_model)\n\n http = self.get_http_for_request()\n return build(\"bigquery\", \"v2\", http=http, model=bigquery_model, requestBuilder=bigquery_http)\n\n\n ###### Utility methods\n\n # tables() methods\n\n def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,\n description=None, friendly_name=None, expiration=None):\n logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)\n\n body = {\n 'tableReference': {\n 'tableId': table_id,\n 'datasetId': dataset_id,\n 'projectId': project_id\n },\n 'schema': {\n 'fields': fields\n }\n }\n\n if friendly_name is not None:\n body['friendlyName'] = friendly_name\n if description is not None:\n body['description'] = description\n if expiration is not None:\n body['expirationTime'] = expiration\n\n try:\n logging.info('Creating table \\ndatasetId:%s \\nprojectId: %s \\ntable_ref:%s', dataset_id, project_id, body)\n\n response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()\n\n logging.info('%s create table response %s', project_id, response)\n\n return response\n except BigQueryDuplicateError:\n if not ignore_existing:\n raise\n\n # tabledata() methods\n\n def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):\n \"\"\"Streams data into BigQuery one record at a time without needing to run a load job.\n\n :param application_id: Project ID of the destination table. (required)\n :param dataset_id: Dataset ID of the destination table. (required)\n :param table_id: Table ID of the destination table. (required)\n :param insert_id_generator: lambda that gets a row and generates an insertId.\n :param rows: The rows to insert (array or single object)\n :param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return \"invalid\" on error rows but doesnt insert anything (rest of the rows marked as \"stopped\").\n So we filter out \"invalid\" rows and do a 2nd pass.\n Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.\n :return:\n A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).\n If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)\n \"\"\"\n if isinstance(rows, dict):\n rows = [rows]\n\n if insert_id_generator is not None:\n rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]\n else:\n rows_json = [{'json': r} for r in rows]\n\n body = {\"rows\": rows_json}\n\n try:\n logging.info(\"Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s\", len(rows), project_id, dataset_id, table_id)\n\n response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()\n\n if 'insertErrors' in response:\n insert_errors = response['insertErrors']\n insert_errors_json = json.dumps(insert_errors)\n if insert_errors_json.find('Maximum allowed row size exceeded') > -1:\n raise BigQueryStreamingMaximumRowSizeExceededError()\n\n logging.error(\"Failed to insert rows:\\n%s\", insert_errors_json)\n if ignore_invalid_rows:\n invalid_indices = [err['index'] for err in insert_errors\n if any([x['reason'] == 'invalid' for x in err['errors']])]\n\n rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]\n\n body_pass2 = {\"rows\": rows_json_pass2}\n response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()\n\n return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))\n\n logging.info(\"Successfully inserted %s rows\", len(rows))\n return response\n except BigQueryError as ex:\n logging.exception(ex.message)\n raise\n\n # jobs() methods\n\n def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):\n job_data = {\n 'projectId': project_id,\n 'configuration': {\n 'load': {\n 'sourceFormat': 'NEWLINE_DELIMITED_JSON',\n 'writeDisposition': 'WRITE_APPEND',\n 'sourceUris': ['gs:/%s' % s for s in gcs_links],\n 'destinationTable': {\n 'projectId': project_id,\n 'datasetId': dataset_id,\n 'tableId': table_id\n },\n }\n }\n }\n\n logging.info('about to insert job:%s', job_data)\n try:\n job = self.api_client.jobs().insert(projectId=project_id, body=job_data).execute()\n\n status = job['status']\n if 'errorResult' in status:\n raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], job['jobReference'])\n\n return job\n except BigQueryError as ex:\n logging.exception(ex)\n raise\n\n def monitor_insert_job(self, project_id, job_id):\n try:\n logging.info('about to monitor job: %s', job_id)\n job = self.api_client.jobs().get(project_id, job_id)\n logging.info('Got job response: %s', job)\n\n\n state = job['status']['state']\n if state == 'DONE':\n logging.info(\"Job %s is done loading!\", job_id)\n if 'errorResult' in job['status']:\n raise BigQueryError.create(job['status']['errorResult'], None, job['status']['errors'], {'projectId': project_id, 'jobId': job_id})\n\n except BigQueryError as ex:\n logging.exception(ex)\n raise\n\n def get_query_results(self, project_id, job_id, timeoutMs=None, pageToken=None, maxResults=None, startIndex=None):\n \"\"\"Retrieves the results of a query job.\n :param project_id: Project ID of the query job.\n :param job_id: Job ID of the query job.\n :param timeoutMs: integer, How long to wait for the query to complete, in milliseconds, before returning. Default is to return immediately. If the timeout passes before the job completes, the request will fail with a TIMEOUT error.\n :param pageToken: string, Page token, returned by a previous call, to request the next page of results\n :param maxResults: integer, Maximum number of results to read\n :param startIndex: string, Zero-based index of the starting row\n :return:\n \"\"\"\n\n try:\n return self.api_client.jobs().getQueryResults(project_id, job_id, timeoutMs, pageToken, maxResults, startIndex)\n except BigQueryError as ex:\n logging.exception(ex)\n raise\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# Imports import os import time import math import random from lib import * def MT19937_keystream_generator(seed: int) -> bytes: """ Generate keystream for MT19937 """ # Verify that the seed is atmost 16 bit long. assert math.log2(seed) <= 16 prng = MT19937(seed) while True: number = prng.extract_number() yield from number.to_bytes(4, "big") def MT19937_CTR(string: str, seed: int) -> bytes: """ Encrypts a plaintext with MT19937 CTR Mode. """ # Verify that the seed is an integer. assert isinstance(seed, int) keystream = MT19937_keystream_generator(seed) if len(string) == 0: return b"" else: return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)]) def main(): plaintext = "Hello World!" # append random characters before plainttext string = b"" for _ in range(random.randint(0, 10)): i = random.randint(33, 126) string += chr(i).encode() string += plaintext.encode() seed = random.randint(1, 2**16) print("> Seed value coded to be", seed) cipher_bytes = MT19937_CTR(string, seed) deciphered_bytes = MT19937_CTR(cipher_bytes, seed) # verify if it can be decrypted assert string == deciphered_bytes #The number of possible keys is super small so you can just try them all. They even insist on it in the instructions: the cipher is using a 16-bits seed. It's kind of weird actually because from the specifications of MT19937 the seed seems to be 32 bits. Well even 32 bits should be small enough to crack, it would just take longer. for seed in range(1, 2**16): deciphered_bytes = MT19937_CTR(cipher_bytes, seed) try: assert string == deciphered_bytes print("> Brute force successful.\nSeed:", seed) break except AssertionError: continue return if __name__=="__main__": main()
normal
{ "blob_id": "66b7d928bc2c98a12f7adb8a375ced21edce8333", "index": 8492, "step-1": "<mask token>\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import os\nimport time\nimport math\nimport random\nfrom lib import *\n\n\ndef MT19937_keystream_generator(seed: int) ->bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n assert math.log2(seed) <= 16\n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, 'big')\n\n\ndef MT19937_CTR(string: str, seed: int) ->bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n assert isinstance(seed, int)\n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b''\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n\n\ndef main():\n plaintext = 'Hello World!'\n string = b''\n for _ in range(random.randint(0, 10)):\n i = random.randint(33, 126)\n string += chr(i).encode()\n string += plaintext.encode()\n seed = random.randint(1, 2 ** 16)\n print('> Seed value coded to be', seed)\n cipher_bytes = MT19937_CTR(string, seed)\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n assert string == deciphered_bytes\n for seed in range(1, 2 ** 16):\n deciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n try:\n assert string == deciphered_bytes\n print('> Brute force successful.\\nSeed:', seed)\n break\n except AssertionError:\n continue\n return\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# Imports\nimport os\nimport time\nimport math\nimport random\nfrom lib import *\n\ndef MT19937_keystream_generator(seed: int) -> bytes:\n \"\"\"\n Generate keystream for MT19937\n \"\"\"\n # Verify that the seed is atmost 16 bit long.\n assert math.log2(seed) <= 16\n \n prng = MT19937(seed)\n while True:\n number = prng.extract_number()\n yield from number.to_bytes(4, \"big\")\n \ndef MT19937_CTR(string: str, seed: int) -> bytes:\n \"\"\"\n Encrypts a plaintext with MT19937 CTR Mode.\n \"\"\"\n # Verify that the seed is an integer.\n assert isinstance(seed, int)\n \n keystream = MT19937_keystream_generator(seed)\n if len(string) == 0:\n return b\"\"\n else:\n return bytes([(b1 ^ b2) for b1, b2 in zip(string, keystream)])\n \ndef main():\n\n\tplaintext = \"Hello World!\"\n\n\t# append random characters before plainttext\n\tstring = b\"\"\n\tfor _ in range(random.randint(0, 10)):\n\t\ti = random.randint(33, 126)\n\t\tstring += chr(i).encode()\n\tstring += plaintext.encode()\n\n\tseed = random.randint(1, 2**16)\n\tprint(\"> Seed value coded to be\", seed)\n\tcipher_bytes = MT19937_CTR(string, seed)\n\tdeciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n\n\t# verify if it can be decrypted\n\tassert string == deciphered_bytes\n\n\t#The number of possible keys is super small so you can just try them all. They even insist on it in the instructions: the cipher is using a 16-bits seed. It's kind of weird actually because from the specifications of MT19937 the seed seems to be 32 bits. Well even 32 bits should be small enough to crack, it would just take longer.\n\tfor seed in range(1, 2**16):\n\t\tdeciphered_bytes = MT19937_CTR(cipher_bytes, seed)\n\t\ttry:\n\t\t assert string == deciphered_bytes\n\t\t print(\"> Brute force successful.\\nSeed:\", seed)\n\t\t break\n\t\texcept AssertionError:\n\t\t continue\n\t\t \n\treturn\n\t\nif __name__==\"__main__\":\n\tmain()\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import torch.nn as nn from layers import maskAConv, MaskBConvBlock class PixelCNN(nn.Module): def __init__(self, n_channel=3, h=128, discrete_channel=256): """PixelCNN Model""" super(PixelCNN, self).__init__() self.discrete_channel = discrete_channel self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3) MaskBConv = [] for i in range(15): MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1)) self.MaskBConv = nn.Sequential(*MaskBConv) # 1x1 conv to 3x256 channels self.out = nn.Sequential( nn.ReLU(), nn.Conv2d(2 * h, 1024, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.ReLU(), nn.Conv2d(1024, n_channel * discrete_channel, kernel_size=1, stride=1, padding=0)) def forward(self, x): """ Args: x: [batch_size, channel, height, width] Return: out [batch_size, channel, height, width, 256] """ batch_size, c_in, height, width = x.size() # [batch_size, 2h, 32, 32] x = self.MaskAConv(x) # [batch_size, 2h, 32, 32] x = self.MaskBConv(x) # [batch_size, 3x256, 32, 32] x = self.out(x) # [batch_size, 3, 256, 32, 32] x = x.view(batch_size, c_in, self.discrete_channel, height, width) # [batch_size, 3, 32, 32, 256] x = x.permute(0, 1, 3, 4, 2) return x
normal
{ "blob_id": "3185b6b1902099caed66ce6f97cd1b9940261fc1", "index": 7533, "step-1": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n <mask token>\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n", "step-3": "<mask token>\n\n\nclass PixelCNN(nn.Module):\n\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n self.discrete_channel = discrete_channel\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,\n kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.\n ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,\n kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n", "step-4": "import torch.nn as nn\nfrom layers import maskAConv, MaskBConvBlock\n\n\nclass PixelCNN(nn.Module):\n\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n self.discrete_channel = discrete_channel\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n self.out = nn.Sequential(nn.ReLU(), nn.Conv2d(2 * h, 1024,\n kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(1024), nn.\n ReLU(), nn.Conv2d(1024, n_channel * discrete_channel,\n kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n x = self.MaskAConv(x)\n x = self.MaskBConv(x)\n x = self.out(x)\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n x = x.permute(0, 1, 3, 4, 2)\n return x\n", "step-5": "import torch.nn as nn\nfrom layers import maskAConv, MaskBConvBlock\n\n\nclass PixelCNN(nn.Module):\n def __init__(self, n_channel=3, h=128, discrete_channel=256):\n \"\"\"PixelCNN Model\"\"\"\n super(PixelCNN, self).__init__()\n\n self.discrete_channel = discrete_channel\n\n self.MaskAConv = maskAConv(n_channel, 2 * h, k_size=7, stride=1, pad=3)\n MaskBConv = []\n for i in range(15):\n MaskBConv.append(MaskBConvBlock(h, k_size=3, stride=1, pad=1))\n self.MaskBConv = nn.Sequential(*MaskBConv)\n\n # 1x1 conv to 3x256 channels\n self.out = nn.Sequential(\n nn.ReLU(),\n nn.Conv2d(2 * h, 1024, kernel_size=1, stride=1, padding=0),\n nn.BatchNorm2d(1024),\n nn.ReLU(),\n nn.Conv2d(1024, n_channel * discrete_channel, kernel_size=1, stride=1, padding=0))\n\n def forward(self, x):\n \"\"\"\n Args:\n x: [batch_size, channel, height, width]\n Return:\n out [batch_size, channel, height, width, 256]\n \"\"\"\n batch_size, c_in, height, width = x.size()\n\n # [batch_size, 2h, 32, 32]\n x = self.MaskAConv(x)\n\n # [batch_size, 2h, 32, 32]\n x = self.MaskBConv(x)\n\n # [batch_size, 3x256, 32, 32]\n x = self.out(x)\n\n # [batch_size, 3, 256, 32, 32]\n x = x.view(batch_size, c_in, self.discrete_channel, height, width)\n\n # [batch_size, 3, 32, 32, 256]\n x = x.permute(0, 1, 3, 4, 2)\n\n return x\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from django import forms from . import models from .validators import validate_metadata class ServiceProviderForm(forms.ModelForm): xml = forms.CharField(label='SAML Metadata XML', widget=forms.Textarea, validators=[validate_metadata]) class Meta: model = models.ServiceProvider fields = ('xml',)
normal
{ "blob_id": "e018d28cbacb568596eb9a5134581db960111e14", "index": 9835, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ServiceProviderForm(forms.ModelForm):\n <mask token>\n\n\n class Meta:\n model = models.ServiceProvider\n fields = 'xml',\n", "step-3": "<mask token>\n\n\nclass ServiceProviderForm(forms.ModelForm):\n xml = forms.CharField(label='SAML Metadata XML', widget=forms.Textarea,\n validators=[validate_metadata])\n\n\n class Meta:\n model = models.ServiceProvider\n fields = 'xml',\n", "step-4": "from django import forms\nfrom . import models\nfrom .validators import validate_metadata\n\n\nclass ServiceProviderForm(forms.ModelForm):\n xml = forms.CharField(label='SAML Metadata XML', widget=forms.Textarea,\n validators=[validate_metadata])\n\n\n class Meta:\n model = models.ServiceProvider\n fields = 'xml',\n", "step-5": "from django import forms\n\nfrom . import models\nfrom .validators import validate_metadata\n\n\nclass ServiceProviderForm(forms.ModelForm):\n xml = forms.CharField(label='SAML Metadata XML',\n widget=forms.Textarea,\n validators=[validate_metadata])\n\n class Meta:\n model = models.ServiceProvider\n fields = ('xml',)", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
"""Run golden output tests. The golden tests are a convenient way to make sure that a "small" change does not break anyone else. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import namedtuple import os import subprocess import sys GOLDEN_CASES_DIR = 'src/googleapis/codegen/testdata/golden' GOLDEN_DISCOVERY_DIR = 'src/googleapis/codegen/testdata/golden_discovery' VERBOSE = False Test = namedtuple('Test', [ 'language', 'variant', 'input', 'options', 'golden_file']) def FindTests(): """Finds golden files and returns Test cases for each.""" for root, _, files in os.walk(GOLDEN_CASES_DIR): path_parts = root.split('/') if path_parts[-3] == 'golden': language = path_parts[-2] variant = path_parts[-1] for golden_file in files: input, _ = golden_file.split('.') options = None if input.endswith('_monolithic'): input = input[0:-11] options = ['--monolithic_source_name=sink'] # pure hackery yield Test( language = language, variant = variant, input = input, options = options, golden_file = os.path.join(root, golden_file)) def Generate(language, variant, input, options, out_file): cmd = [ 'python', 'src/googleapis/codegen/generate_library.py', '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input), '--language=%s' % language, '--language_variant=%s' % variant, '--output_format=txt', '--output_file=%s' % out_file, ] if options: cmd.extend(options) try: if VERBOSE: print('generate cmd: %s' % ' '.join(cmd)) subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr) except subprocess.CalledProcessError as e: msg = '(%s, %s, %s, %s)' % (language, variant, input, options) print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd))) return False return True def RunTest(test): # Fix this out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1] if Generate(test.language, test.variant, test.input, test.options, out_file): cmd = ['diff', '--brief', test.golden_file, out_file] try: subprocess.check_call(cmd, stderr=sys.stderr) print('PASS: %s, %s, %s, %s' % (test.language, test.variant, test.input, test.options)) except subprocess.CalledProcessError as e: print('FAIL: %s' % str(test)) def main(argv): if len(argv) > 1: raise app.UsageError('Too many command-line arguments.') src_path = os.path.join(os.getcwd(), 'src') python_path = os.environ.get('PYTHONPATH') if python_path: os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path) else: os.environ['PYTHONPATH'] = src_path for test in FindTests(): RunTest(test) if __name__ == '__main__': main(sys.argv)
normal
{ "blob_id": "2294951af6ad7a5e752285194d0586c79c49ef87", "index": 4254, "step-1": "<mask token>\n\n\ndef FindTests():\n \"\"\"Finds golden files and returns Test cases for each.\"\"\"\n for root, _, files in os.walk(GOLDEN_CASES_DIR):\n path_parts = root.split('/')\n if path_parts[-3] == 'golden':\n language = path_parts[-2]\n variant = path_parts[-1]\n for golden_file in files:\n input, _ = golden_file.split('.')\n options = None\n if input.endswith('_monolithic'):\n input = input[0:-11]\n options = ['--monolithic_source_name=sink']\n yield Test(language=language, variant=variant, input=input,\n options=options, golden_file=os.path.join(root,\n golden_file))\n\n\ndef Generate(language, variant, input, options, out_file):\n cmd = ['python', 'src/googleapis/codegen/generate_library.py', \n '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input), \n '--language=%s' % language, '--language_variant=%s' % variant,\n '--output_format=txt', '--output_file=%s' % out_file]\n if options:\n cmd.extend(options)\n try:\n if VERBOSE:\n print('generate cmd: %s' % ' '.join(cmd))\n subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)\n except subprocess.CalledProcessError as e:\n msg = '(%s, %s, %s, %s)' % (language, variant, input, options)\n print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))\n return False\n return True\n\n\ndef RunTest(test):\n out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]\n if Generate(test.language, test.variant, test.input, test.options, out_file\n ):\n cmd = ['diff', '--brief', test.golden_file, out_file]\n try:\n subprocess.check_call(cmd, stderr=sys.stderr)\n print('PASS: %s, %s, %s, %s' % (test.language, test.variant,\n test.input, test.options))\n except subprocess.CalledProcessError as e:\n print('FAIL: %s' % str(test))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n src_path = os.path.join(os.getcwd(), 'src')\n python_path = os.environ.get('PYTHONPATH')\n if python_path:\n os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)\n else:\n os.environ['PYTHONPATH'] = src_path\n for test in FindTests():\n RunTest(test)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef FindTests():\n \"\"\"Finds golden files and returns Test cases for each.\"\"\"\n for root, _, files in os.walk(GOLDEN_CASES_DIR):\n path_parts = root.split('/')\n if path_parts[-3] == 'golden':\n language = path_parts[-2]\n variant = path_parts[-1]\n for golden_file in files:\n input, _ = golden_file.split('.')\n options = None\n if input.endswith('_monolithic'):\n input = input[0:-11]\n options = ['--monolithic_source_name=sink']\n yield Test(language=language, variant=variant, input=input,\n options=options, golden_file=os.path.join(root,\n golden_file))\n\n\ndef Generate(language, variant, input, options, out_file):\n cmd = ['python', 'src/googleapis/codegen/generate_library.py', \n '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input), \n '--language=%s' % language, '--language_variant=%s' % variant,\n '--output_format=txt', '--output_file=%s' % out_file]\n if options:\n cmd.extend(options)\n try:\n if VERBOSE:\n print('generate cmd: %s' % ' '.join(cmd))\n subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)\n except subprocess.CalledProcessError as e:\n msg = '(%s, %s, %s, %s)' % (language, variant, input, options)\n print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))\n return False\n return True\n\n\ndef RunTest(test):\n out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]\n if Generate(test.language, test.variant, test.input, test.options, out_file\n ):\n cmd = ['diff', '--brief', test.golden_file, out_file]\n try:\n subprocess.check_call(cmd, stderr=sys.stderr)\n print('PASS: %s, %s, %s, %s' % (test.language, test.variant,\n test.input, test.options))\n except subprocess.CalledProcessError as e:\n print('FAIL: %s' % str(test))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n src_path = os.path.join(os.getcwd(), 'src')\n python_path = os.environ.get('PYTHONPATH')\n if python_path:\n os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)\n else:\n os.environ['PYTHONPATH'] = src_path\n for test in FindTests():\n RunTest(test)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-3": "<mask token>\nGOLDEN_CASES_DIR = 'src/googleapis/codegen/testdata/golden'\nGOLDEN_DISCOVERY_DIR = 'src/googleapis/codegen/testdata/golden_discovery'\nVERBOSE = False\nTest = namedtuple('Test', ['language', 'variant', 'input', 'options',\n 'golden_file'])\n\n\ndef FindTests():\n \"\"\"Finds golden files and returns Test cases for each.\"\"\"\n for root, _, files in os.walk(GOLDEN_CASES_DIR):\n path_parts = root.split('/')\n if path_parts[-3] == 'golden':\n language = path_parts[-2]\n variant = path_parts[-1]\n for golden_file in files:\n input, _ = golden_file.split('.')\n options = None\n if input.endswith('_monolithic'):\n input = input[0:-11]\n options = ['--monolithic_source_name=sink']\n yield Test(language=language, variant=variant, input=input,\n options=options, golden_file=os.path.join(root,\n golden_file))\n\n\ndef Generate(language, variant, input, options, out_file):\n cmd = ['python', 'src/googleapis/codegen/generate_library.py', \n '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input), \n '--language=%s' % language, '--language_variant=%s' % variant,\n '--output_format=txt', '--output_file=%s' % out_file]\n if options:\n cmd.extend(options)\n try:\n if VERBOSE:\n print('generate cmd: %s' % ' '.join(cmd))\n subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)\n except subprocess.CalledProcessError as e:\n msg = '(%s, %s, %s, %s)' % (language, variant, input, options)\n print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))\n return False\n return True\n\n\ndef RunTest(test):\n out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]\n if Generate(test.language, test.variant, test.input, test.options, out_file\n ):\n cmd = ['diff', '--brief', test.golden_file, out_file]\n try:\n subprocess.check_call(cmd, stderr=sys.stderr)\n print('PASS: %s, %s, %s, %s' % (test.language, test.variant,\n test.input, test.options))\n except subprocess.CalledProcessError as e:\n print('FAIL: %s' % str(test))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n src_path = os.path.join(os.getcwd(), 'src')\n python_path = os.environ.get('PYTHONPATH')\n if python_path:\n os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)\n else:\n os.environ['PYTHONPATH'] = src_path\n for test in FindTests():\n RunTest(test)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-4": "<mask token>\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom collections import namedtuple\nimport os\nimport subprocess\nimport sys\nGOLDEN_CASES_DIR = 'src/googleapis/codegen/testdata/golden'\nGOLDEN_DISCOVERY_DIR = 'src/googleapis/codegen/testdata/golden_discovery'\nVERBOSE = False\nTest = namedtuple('Test', ['language', 'variant', 'input', 'options',\n 'golden_file'])\n\n\ndef FindTests():\n \"\"\"Finds golden files and returns Test cases for each.\"\"\"\n for root, _, files in os.walk(GOLDEN_CASES_DIR):\n path_parts = root.split('/')\n if path_parts[-3] == 'golden':\n language = path_parts[-2]\n variant = path_parts[-1]\n for golden_file in files:\n input, _ = golden_file.split('.')\n options = None\n if input.endswith('_monolithic'):\n input = input[0:-11]\n options = ['--monolithic_source_name=sink']\n yield Test(language=language, variant=variant, input=input,\n options=options, golden_file=os.path.join(root,\n golden_file))\n\n\ndef Generate(language, variant, input, options, out_file):\n cmd = ['python', 'src/googleapis/codegen/generate_library.py', \n '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input), \n '--language=%s' % language, '--language_variant=%s' % variant,\n '--output_format=txt', '--output_file=%s' % out_file]\n if options:\n cmd.extend(options)\n try:\n if VERBOSE:\n print('generate cmd: %s' % ' '.join(cmd))\n subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)\n except subprocess.CalledProcessError as e:\n msg = '(%s, %s, %s, %s)' % (language, variant, input, options)\n print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))\n return False\n return True\n\n\ndef RunTest(test):\n out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]\n if Generate(test.language, test.variant, test.input, test.options, out_file\n ):\n cmd = ['diff', '--brief', test.golden_file, out_file]\n try:\n subprocess.check_call(cmd, stderr=sys.stderr)\n print('PASS: %s, %s, %s, %s' % (test.language, test.variant,\n test.input, test.options))\n except subprocess.CalledProcessError as e:\n print('FAIL: %s' % str(test))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n src_path = os.path.join(os.getcwd(), 'src')\n python_path = os.environ.get('PYTHONPATH')\n if python_path:\n os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)\n else:\n os.environ['PYTHONPATH'] = src_path\n for test in FindTests():\n RunTest(test)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-5": "\"\"\"Run golden output tests.\n\nThe golden tests are a convenient way to make sure that a \"small\" change\ndoes not break anyone else.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport os\nimport subprocess\nimport sys\n\n\nGOLDEN_CASES_DIR = 'src/googleapis/codegen/testdata/golden'\nGOLDEN_DISCOVERY_DIR = 'src/googleapis/codegen/testdata/golden_discovery'\nVERBOSE = False\n\nTest = namedtuple('Test', [\n 'language',\n 'variant',\n 'input',\n 'options',\n 'golden_file'])\n\n\ndef FindTests():\n \"\"\"Finds golden files and returns Test cases for each.\"\"\"\n for root, _, files in os.walk(GOLDEN_CASES_DIR):\n path_parts = root.split('/')\n if path_parts[-3] == 'golden':\n language = path_parts[-2]\n variant = path_parts[-1]\n for golden_file in files:\n input, _ = golden_file.split('.')\n options = None\n if input.endswith('_monolithic'):\n input = input[0:-11]\n options = ['--monolithic_source_name=sink'] # pure hackery\n yield Test(\n language = language,\n variant = variant,\n input = input,\n options = options,\n golden_file = os.path.join(root, golden_file))\n\n\ndef Generate(language, variant, input, options, out_file):\n cmd = [\n 'python',\n 'src/googleapis/codegen/generate_library.py',\n '--input=%s/%s.json' % (GOLDEN_DISCOVERY_DIR, input),\n '--language=%s' % language,\n '--language_variant=%s' % variant,\n '--output_format=txt',\n '--output_file=%s' % out_file,\n ]\n if options:\n cmd.extend(options)\n try:\n if VERBOSE:\n print('generate cmd: %s' % ' '.join(cmd))\n subprocess.check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)\n except subprocess.CalledProcessError as e:\n msg = '(%s, %s, %s, %s)' % (language, variant, input, options)\n print('FAIL: generate(%s), cmd=[%s]' % (msg, ' '.join(cmd)))\n return False\n return True\n\n\ndef RunTest(test):\n # Fix this\n out_file = '/tmp/%s.new' % test.golden_file.split('/')[-1]\n if Generate(test.language, test.variant, test.input, test.options, out_file):\n cmd = ['diff', '--brief', test.golden_file, out_file]\n try:\n subprocess.check_call(cmd, stderr=sys.stderr)\n print('PASS: %s, %s, %s, %s' % (test.language, test.variant, test.input, test.options))\n except subprocess.CalledProcessError as e:\n print('FAIL: %s' % str(test))\n\n\ndef main(argv):\n if len(argv) > 1:\n raise app.UsageError('Too many command-line arguments.')\n\n src_path = os.path.join(os.getcwd(), 'src')\n python_path = os.environ.get('PYTHONPATH')\n if python_path:\n os.environ['PYTHONPATH'] = '%s:%s' % (src_path, python_path)\n else:\n os.environ['PYTHONPATH'] = src_path\n\n for test in FindTests():\n RunTest(test)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
# -*- coding: utf-8 -*- # Generated by Django 1.9.4 on 2016-04-12 14:41 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('ashesiundergraduate', '0016_orphanage'), ] operations = [ migrations.AlterField( model_name='orphanage', name='contact_person_phone_number', field=models.CharField(help_text='Enter phone number in the format +233xxxxxxxxx', max_length=15, verbose_name='contact person phone number'), ), migrations.AlterField( model_name='personalinformation', name='date_of_birth', field=models.DateField(default=datetime.datetime(2016, 4, 12, 14, 40, 34, 67485, tzinfo=utc), verbose_name='date of birth'), preserve_default=False, ), migrations.AlterField( model_name='personalinformation', name='gender', field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1, verbose_name='gender'), preserve_default=False, ), migrations.AlterField( model_name='personalinformation', name='photo_height', field=models.CharField(default=2, max_length=5, verbose_name='photo height'), preserve_default=False, ), migrations.AlterField( model_name='personalinformation', name='photo_width', field=models.CharField(default=2, max_length=5, verbose_name='photo width'), preserve_default=False, ), migrations.AlterField( model_name='personalinformation', name='year_applied', field=models.CharField(default=1994, max_length=4, verbose_name='year applied'), preserve_default=False, ), ]
normal
{ "blob_id": "5f2110bcab465a85ad7db1b0e01a882b3ed305a5", "index": 2876, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ashesiundergraduate', '0016_orphanage')]\n operations = [migrations.AlterField(model_name='orphanage', name=\n 'contact_person_phone_number', field=models.CharField(help_text=\n 'Enter phone number in the format +233xxxxxxxxx', max_length=15,\n verbose_name='contact person phone number')), migrations.AlterField\n (model_name='personalinformation', name='date_of_birth', field=\n models.DateField(default=datetime.datetime(2016, 4, 12, 14, 40, 34,\n 67485, tzinfo=utc), verbose_name='date of birth'), preserve_default\n =False), migrations.AlterField(model_name='personalinformation',\n name='gender', field=models.CharField(choices=[('M', 'Male'), ('F',\n 'Female')], default='M', max_length=1, verbose_name='gender'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='photo_height', field=models.CharField(\n default=2, max_length=5, verbose_name='photo height'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='photo_width', field=models.CharField(\n default=2, max_length=5, verbose_name='photo width'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='year_applied', field=models.CharField(\n default=1994, max_length=4, verbose_name='year applied'),\n preserve_default=False)]\n", "step-4": "from __future__ import unicode_literals\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('ashesiundergraduate', '0016_orphanage')]\n operations = [migrations.AlterField(model_name='orphanage', name=\n 'contact_person_phone_number', field=models.CharField(help_text=\n 'Enter phone number in the format +233xxxxxxxxx', max_length=15,\n verbose_name='contact person phone number')), migrations.AlterField\n (model_name='personalinformation', name='date_of_birth', field=\n models.DateField(default=datetime.datetime(2016, 4, 12, 14, 40, 34,\n 67485, tzinfo=utc), verbose_name='date of birth'), preserve_default\n =False), migrations.AlterField(model_name='personalinformation',\n name='gender', field=models.CharField(choices=[('M', 'Male'), ('F',\n 'Female')], default='M', max_length=1, verbose_name='gender'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='photo_height', field=models.CharField(\n default=2, max_length=5, verbose_name='photo height'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='photo_width', field=models.CharField(\n default=2, max_length=5, verbose_name='photo width'),\n preserve_default=False), migrations.AlterField(model_name=\n 'personalinformation', name='year_applied', field=models.CharField(\n default=1994, max_length=4, verbose_name='year applied'),\n preserve_default=False)]\n", "step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.4 on 2016-04-12 14:41\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ashesiundergraduate', '0016_orphanage'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='orphanage',\n name='contact_person_phone_number',\n field=models.CharField(help_text='Enter phone number in the format +233xxxxxxxxx', max_length=15, verbose_name='contact person phone number'),\n ),\n migrations.AlterField(\n model_name='personalinformation',\n name='date_of_birth',\n field=models.DateField(default=datetime.datetime(2016, 4, 12, 14, 40, 34, 67485, tzinfo=utc), verbose_name='date of birth'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='personalinformation',\n name='gender',\n field=models.CharField(choices=[('M', 'Male'), ('F', 'Female')], default='M', max_length=1, verbose_name='gender'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='personalinformation',\n name='photo_height',\n field=models.CharField(default=2, max_length=5, verbose_name='photo height'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='personalinformation',\n name='photo_width',\n field=models.CharField(default=2, max_length=5, verbose_name='photo width'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='personalinformation',\n name='year_applied',\n field=models.CharField(default=1994, max_length=4, verbose_name='year applied'),\n preserve_default=False,\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from .FactorWarData import Get_FactorWar_Data
normal
{ "blob_id": "5aa55a96e414ad6b3ceebbcbd71c23a1fd69f0d1", "index": 6400, "step-1": "<mask token>\n", "step-2": "from .FactorWarData import Get_FactorWar_Data\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
version https://git-lfs.github.com/spec/v1 oid sha256:26be7fc8be181fad8e821179cce6be14e37a5f303e532e6fb00f848d5f33fe41 size 752
normal
{ "blob_id": "0f37baf3b08ecf7bd8db43ecc2f29c3ca6e00af0", "index": 3089, "step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:26be7fc8be181fad8e821179cce6be14e37a5f303e532e6fb00f848d5f33fe41\nsize 752\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
import math def sexpr_key(s_expr): return s_expr.strip('(').split(' ')[0] def expr_key(expr): return expr.split(' ')[0] def expr_data(expr): return expr.split(' ')[1:] def list_key(_list): if type(_list) is type(list()): return _list[0] else: return expr_key(_list) def list_data(_list): if type(_list) is type(list()): return _list[1] else: temp = expr_data(_list) if temp: return temp[0] else: return [] def extracted_data(string): t = string.split(' ') t.pop(0) return t def mean(data): if data: return float(sum(data))/len(data) else: return 0 def variance(data): if data: m_mean = mean(data) return sum([math.pow((i - m_mean), 2) for i in data])/len(data) else: return 0
normal
{ "blob_id": "18789b5106d4be8a02197b165e16a74c08a58c66", "index": 8578, "step-1": "<mask token>\n\n\ndef sexpr_key(s_expr):\n return s_expr.strip('(').split(' ')[0]\n\n\n<mask token>\n\n\ndef list_data(_list):\n if type(_list) is type(list()):\n return _list[1]\n else:\n temp = expr_data(_list)\n if temp:\n return temp[0]\n else:\n return []\n\n\n<mask token>\n\n\ndef mean(data):\n if data:\n return float(sum(data)) / len(data)\n else:\n return 0\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef sexpr_key(s_expr):\n return s_expr.strip('(').split(' ')[0]\n\n\ndef expr_key(expr):\n return expr.split(' ')[0]\n\n\ndef expr_data(expr):\n return expr.split(' ')[1:]\n\n\ndef list_key(_list):\n if type(_list) is type(list()):\n return _list[0]\n else:\n return expr_key(_list)\n\n\ndef list_data(_list):\n if type(_list) is type(list()):\n return _list[1]\n else:\n temp = expr_data(_list)\n if temp:\n return temp[0]\n else:\n return []\n\n\n<mask token>\n\n\ndef mean(data):\n if data:\n return float(sum(data)) / len(data)\n else:\n return 0\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef sexpr_key(s_expr):\n return s_expr.strip('(').split(' ')[0]\n\n\ndef expr_key(expr):\n return expr.split(' ')[0]\n\n\ndef expr_data(expr):\n return expr.split(' ')[1:]\n\n\ndef list_key(_list):\n if type(_list) is type(list()):\n return _list[0]\n else:\n return expr_key(_list)\n\n\ndef list_data(_list):\n if type(_list) is type(list()):\n return _list[1]\n else:\n temp = expr_data(_list)\n if temp:\n return temp[0]\n else:\n return []\n\n\n<mask token>\n\n\ndef mean(data):\n if data:\n return float(sum(data)) / len(data)\n else:\n return 0\n\n\ndef variance(data):\n if data:\n m_mean = mean(data)\n return sum([math.pow(i - m_mean, 2) for i in data]) / len(data)\n else:\n return 0\n", "step-4": "<mask token>\n\n\ndef sexpr_key(s_expr):\n return s_expr.strip('(').split(' ')[0]\n\n\ndef expr_key(expr):\n return expr.split(' ')[0]\n\n\ndef expr_data(expr):\n return expr.split(' ')[1:]\n\n\ndef list_key(_list):\n if type(_list) is type(list()):\n return _list[0]\n else:\n return expr_key(_list)\n\n\ndef list_data(_list):\n if type(_list) is type(list()):\n return _list[1]\n else:\n temp = expr_data(_list)\n if temp:\n return temp[0]\n else:\n return []\n\n\ndef extracted_data(string):\n t = string.split(' ')\n t.pop(0)\n return t\n\n\ndef mean(data):\n if data:\n return float(sum(data)) / len(data)\n else:\n return 0\n\n\ndef variance(data):\n if data:\n m_mean = mean(data)\n return sum([math.pow(i - m_mean, 2) for i in data]) / len(data)\n else:\n return 0\n", "step-5": "import math\n\ndef sexpr_key(s_expr):\n return s_expr.strip('(').split(' ')[0]\n\ndef expr_key(expr):\n return expr.split(' ')[0]\n\ndef expr_data(expr):\n return expr.split(' ')[1:]\n\ndef list_key(_list):\n if type(_list) is type(list()):\n return _list[0]\n else:\n return expr_key(_list)\n \n\ndef list_data(_list):\n if type(_list) is type(list()):\n return _list[1]\n else:\n temp = expr_data(_list)\n if temp:\n return temp[0]\n else:\n return []\n\ndef extracted_data(string):\n t = string.split(' ')\n t.pop(0)\n return t\n\n\ndef mean(data):\n if data:\n return float(sum(data))/len(data)\n else:\n return 0\n\ndef variance(data):\n if data:\n m_mean = mean(data)\n return sum([math.pow((i - m_mean), 2) for i in data])/len(data)\n else:\n return 0\n \n", "step-ids": [ 3, 6, 7, 8, 10 ] }
[ 3, 6, 7, 8, 10 ]
# Overview file #import python classes import numpy as np import random as rn import math import matplotlib.pyplot as plt import pylab from mpl_toolkits.mplot3d import Axes3D #import self produced classes import forcemodule as fm import init_sys # independent parameters dt = 0.004 N=2048 lpnum = 1000 density = 0.85 temp = 0.8 # Loading initial conditions mom = init_sys.init_mom(N, temp) pos, l = init_sys.init_pos(N, density) forces = init_sys.init_forc(N) pot = init_sys.init_pot(N) print N, 'N' # Iteration Verlet method forces, pot = fm.calc_forces(pos,forces,pot,l,[N]) formersummom = 0 for lp in range(lpnum): mom = mom + forces*0.5*dt pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l forces, pot = fm.calc_forces(pos,forces,pot,l,[N]) mom = mom + forces*0.5*dt Ken = np.sum(mom*mom*0.5, axis=1) toten = sum(Ken) - sum(pot) print toten, np.sum(mom) ''' fig = pylab.figure() ax = Axes3D(fig) ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b') ax.set_xlabel('X Label') ax.set_ylabel('Y Label') ax.set_zlabel('Z Label') plt.show() ''' # Plotting the positions
normal
{ "blob_id": "63c214d9e831356345ba2eee68634af36964dcff", "index": 550, "step-1": "# Overview file\n\n#import python classes\nimport numpy as np\nimport random as rn\nimport math\nimport matplotlib.pyplot as plt\nimport pylab\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n#import self produced classes\nimport forcemodule as fm\nimport init_sys\n\n\n# independent parameters\ndt = 0.004\nN=2048\nlpnum = 1000\ndensity = 0.85\ntemp = 0.8\n\n\n# Loading initial conditions\nmom = init_sys.init_mom(N, temp) \npos, l = init_sys.init_pos(N, density) \nforces = init_sys.init_forc(N)\npot = init_sys.init_pot(N)\n\nprint N, 'N'\n\n\n\n\n# Iteration Verlet method\n\nforces, pot = fm.calc_forces(pos,forces,pot,l,[N])\nformersummom = 0\nfor lp in range(lpnum):\n mom = mom + forces*0.5*dt\n pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l\n forces, pot = fm.calc_forces(pos,forces,pot,l,[N])\n mom = mom + forces*0.5*dt\n Ken = np.sum(mom*mom*0.5, axis=1)\n toten = sum(Ken) - sum(pot)\n print toten, np.sum(mom)\n'''\n fig = pylab.figure()\n ax = Axes3D(fig) \n ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b')\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()\n '''\n\n\n\n\n# Plotting the positions\n\n\n\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
# # In development by Jihye Sofia Seo https://www.linkedin.com/in/jihyeseo # forked from the code of Al Sweigart # http://inventwithpython.com/pygame/chapter10.html # whose books are very helpful for learning Python and PyGame. Many thanks! # Main change is that his version uses flood fill algorithm, which could not run for large boards. # This file modified the algorithm. # # Flood-It is an NP hard problem http://arxiv.org/abs/1001.4420 for 3 colors or more. # The goal of this project is to find an efficient algorithm for autoplay. # # Any comments are welcome at [email protected] # upload: May 7 2016 Berlin Germany # import random, sys, webbrowser, copy, pygame from pygame.locals import * #sys.setrecursionlimit(1000000) #FPS = 30 WINDOWWIDTH = 1920 WINDOWHEIGHT = 1000 boxSize = 20 PALETTEGAPSIZE = 5 PALETTESIZE = 30 boardWidth = 93 boardHeight = 49 # Creates a board data structure with random colors for each box. board = [] conqueredAt = [[False for y in range(boardHeight)] for x in range(boardWidth)] neverQueue = [[False for y in range(boardHeight)] for x in range(boardWidth)] conqueredAt[0][0] = True class Queue: def __init__(self): self.items = [] def isEmpty(self): return self.items == [] def enqueue(self, item): self.items.insert(0,item) def dequeue(self): return self.items.pop() def size(self): return len(self.items) def buildQueue(): # add only boundaries floodQueue = Queue() for x in range(boardWidth): for y in range(boardHeight): if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True): noFrontier = True if (x > 0) : noFrontier = noFrontier & (conqueredAt[x-1][y]) if (x < boardWidth - 1): noFrontier = noFrontier & (conqueredAt[x+1][y]) if (y > 0): noFrontier = noFrontier & (conqueredAt[x][y-1]) if (y < boardHeight - 1): noFrontier = noFrontier & (conqueredAt[x][y+1]) if noFrontier : neverQueue[x][y] = True else: floodQueue.enqueue([x, y]) return floodQueue # R G B WHITE = (255, 255, 255) DARKGRAY = ( 70, 70, 70) BLACK = ( 0, 0, 0) RED = (255, 0, 0) GREEN = ( 0, 255, 0) BLUE = ( 0, 0, 255) YELLOW = (255, 255, 0) ORANGE = (255, 128, 0) PURPLE = (255, 0, 255) # The first color in each scheme is the background color, the next six are the palette colors. COLORSCHEMES = ((150, 200, 255), (97, 215, 164) , #lightGr (0, 125, 50) ,#darkGr (23, 149, 195) , # light ocean (81, 85 , 141), # lightPur (147, 3, 167) , # purple (241, 109, 149), # jindalle (255, 180, 115), # tangerine (166, 147, 0), # tangerine? (183, 182, 208), # gray (68, 0, 0) # drak grey ) bgColor = COLORSCHEMES[0] paletteColors = COLORSCHEMES[1:] def main(): global FPSCLOCK, DISPLAYSURF pygame.init() DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT)) pygame.display.set_caption('Flood it') generateRandomBoard(boardWidth, boardHeight) lastPaletteClicked = None while True: # main game loop paletteClicked = None # Draw the screen. DISPLAYSURF.fill(bgColor) drawBoard() drawPalettes() pygame.display.update() for event in pygame.event.get(KEYUP): # get all the KEYUP events if event.key == K_ESCAPE: pygame.quit() # terminate if the KEYUP event was for the Esc key sys.exit() elif event.key == K_0: paletteClicked = 9 elif event.key == K_1: paletteClicked = 0 elif event.key == K_2: paletteClicked = 1 elif event.key == K_3: paletteClicked = 2 elif event.key == K_4: paletteClicked = 3 elif event.key == K_5: paletteClicked = 4 elif event.key == K_6: paletteClicked = 5 elif event.key == K_7: paletteClicked = 6 elif event.key == K_8: paletteClicked = 7 elif event.key == K_9: paletteClicked = 8 # pygame.event.post(event) # put the other KEYUP event objects back paletteClicked = random.randint(0,9) pygame.time.wait(50) if paletteClicked != None and paletteClicked != lastPaletteClicked: # a palette button was clicked that is different from the # last palette button clicked (this check prevents the player # from accidentally clicking the same palette twice) lastPaletteClicked = paletteClicked #if board[0][0] != paletteClicked : floodFill(board[0][0], paletteClicked, buildQueue()) drawBoard() pygame.display.update() # FPSCLOCK.tick(FPS) # pygame.display.update() #FPSCLOCK.tick(FPS) def generateRandomBoard(width, height): for x in range(width): column = [] for y in range(height): column.append(random.randint(0, len(paletteColors) - 1)) board.append(column) def drawBoard(): for x in range(boardWidth): for y in range(boardHeight): left, top = leftTopPixelCoordOfBox(x, y) pygame.draw.rect(DISPLAYSURF, (paletteColors[board[x][y]]), (left, top, boxSize, boxSize)) DISPLAYSURF.blit(DISPLAYSURF, (0, 0)) def drawPalettes(): # Draws the six color palettes at the left of the screen. numColors = len(paletteColors) textSize = 30 font = pygame.font.Font(None, textSize) for i in range(numColors): top = 10 + (i * PALETTESIZE) + (i * PALETTEGAPSIZE) left = 10 pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE)) textImg = font.render( str((i+1) % 10), 1, bgColor) DISPLAYSURF.blit( textImg, (left+10 +0*(PALETTESIZE/2-textSize/2),top+7 +0*(PALETTESIZE/2-textSize/2))) def floodFill(teamColor, newColor, queue): while(queue.isEmpty() == False): checkHere = queue.dequeue() (x,y) = (checkHere[0],checkHere[1]) board[x][y] = newColor conqueredAt[x][y] = True if x > 0 : (X,Y) = (x-1,y) if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): queue.enqueue([X, Y]) # on box to the left if x < boardWidth - 1: (X,Y) = (x+1,y) if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): queue.enqueue([X, Y]) # on box to the right if y > 0: (X,Y) = (x,y-1) if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): queue.enqueue([X, Y]) # on box to up if y < boardHeight - 1: (X,Y) = (x,y+1) if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): queue.enqueue([X, Y]) # on box to down for x in range(boardWidth): for y in range(boardHeight): if conqueredAt[x][y] == True : board[x][y] = newColor def leftTopPixelCoordOfBox(boxx, boxy): # Returns the x and y of the left-topmost pixel of the xth & yth box. xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2 + 23) ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2 ) return (boxx * boxSize + xmargin, boxy * boxSize + ymargin) if __name__ == '__main__': main()
normal
{ "blob_id": "ec200ee66e3c4a93bbd8e75f0e8b715f54b5479d", "index": 6781, "step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\n<mask token>\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\n<mask token>\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\n<mask token>\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\n<mask token>\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\ndef floodFill(teamColor, newColor, queue):\n while queue.isEmpty() == False:\n checkHere = queue.dequeue()\n x, y = checkHere[0], checkHere[1]\n board[x][y] = newColor\n conqueredAt[x][y] = True\n if x > 0:\n X, Y = x - 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if x < boardWidth - 1:\n X, Y = x + 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y > 0:\n X, Y = x, y - 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y < boardHeight - 1:\n X, Y = x, y + 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n for x in range(boardWidth):\n for y in range(boardHeight):\n if conqueredAt[x][y] == True:\n board[x][y] = newColor\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\n<mask token>\n", "step-4": "import random, sys, webbrowser, copy, pygame\nfrom pygame.locals import *\nWINDOWWIDTH = 1920\nWINDOWHEIGHT = 1000\nboxSize = 20\nPALETTEGAPSIZE = 5\nPALETTESIZE = 30\nboardWidth = 93\nboardHeight = 49\nboard = []\nconqueredAt = [[(False) for y in range(boardHeight)] for x in range(boardWidth)\n ]\nneverQueue = [[(False) for y in range(boardHeight)] for x in range(boardWidth)]\nconqueredAt[0][0] = True\n\n\nclass Queue:\n\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def enqueue(self, item):\n self.items.insert(0, item)\n\n def dequeue(self):\n return self.items.pop()\n\n def size(self):\n return len(self.items)\n\n\ndef buildQueue():\n floodQueue = Queue()\n for x in range(boardWidth):\n for y in range(boardHeight):\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True):\n noFrontier = True\n if x > 0:\n noFrontier = noFrontier & conqueredAt[x - 1][y]\n if x < boardWidth - 1:\n noFrontier = noFrontier & conqueredAt[x + 1][y]\n if y > 0:\n noFrontier = noFrontier & conqueredAt[x][y - 1]\n if y < boardHeight - 1:\n noFrontier = noFrontier & conqueredAt[x][y + 1]\n if noFrontier:\n neverQueue[x][y] = True\n else:\n floodQueue.enqueue([x, y])\n return floodQueue\n\n\nWHITE = 255, 255, 255\nDARKGRAY = 70, 70, 70\nBLACK = 0, 0, 0\nRED = 255, 0, 0\nGREEN = 0, 255, 0\nBLUE = 0, 0, 255\nYELLOW = 255, 255, 0\nORANGE = 255, 128, 0\nPURPLE = 255, 0, 255\nCOLORSCHEMES = (150, 200, 255), (97, 215, 164), (0, 125, 50), (23, 149, 195), (\n 81, 85, 141), (147, 3, 167), (241, 109, 149), (255, 180, 115), (166, 147, 0\n ), (183, 182, 208), (68, 0, 0)\nbgColor = COLORSCHEMES[0]\npaletteColors = COLORSCHEMES[1:]\n\n\ndef main():\n global FPSCLOCK, DISPLAYSURF\n pygame.init()\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n pygame.display.set_caption('Flood it')\n generateRandomBoard(boardWidth, boardHeight)\n lastPaletteClicked = None\n while True:\n paletteClicked = None\n DISPLAYSURF.fill(bgColor)\n drawBoard()\n drawPalettes()\n pygame.display.update()\n for event in pygame.event.get(KEYUP):\n if event.key == K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == K_0:\n paletteClicked = 9\n elif event.key == K_1:\n paletteClicked = 0\n elif event.key == K_2:\n paletteClicked = 1\n elif event.key == K_3:\n paletteClicked = 2\n elif event.key == K_4:\n paletteClicked = 3\n elif event.key == K_5:\n paletteClicked = 4\n elif event.key == K_6:\n paletteClicked = 5\n elif event.key == K_7:\n paletteClicked = 6\n elif event.key == K_8:\n paletteClicked = 7\n elif event.key == K_9:\n paletteClicked = 8\n paletteClicked = random.randint(0, 9)\n pygame.time.wait(50)\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\n lastPaletteClicked = paletteClicked\n floodFill(board[0][0], paletteClicked, buildQueue())\n drawBoard()\n pygame.display.update()\n\n\ndef generateRandomBoard(width, height):\n for x in range(width):\n column = []\n for y in range(height):\n column.append(random.randint(0, len(paletteColors) - 1))\n board.append(column)\n\n\ndef drawBoard():\n for x in range(boardWidth):\n for y in range(boardHeight):\n left, top = leftTopPixelCoordOfBox(x, y)\n pygame.draw.rect(DISPLAYSURF, paletteColors[board[x][y]], (left,\n top, boxSize, boxSize))\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\n\n\ndef drawPalettes():\n numColors = len(paletteColors)\n textSize = 30\n font = pygame.font.Font(None, textSize)\n for i in range(numColors):\n top = 10 + i * PALETTESIZE + i * PALETTEGAPSIZE\n left = 10\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top,\n PALETTESIZE, PALETTESIZE))\n textImg = font.render(str((i + 1) % 10), 1, bgColor)\n DISPLAYSURF.blit(textImg, (left + 10 + 0 * (PALETTESIZE / 2 - \n textSize / 2), top + 7 + 0 * (PALETTESIZE / 2 - textSize / 2)))\n\n\ndef floodFill(teamColor, newColor, queue):\n while queue.isEmpty() == False:\n checkHere = queue.dequeue()\n x, y = checkHere[0], checkHere[1]\n board[x][y] = newColor\n conqueredAt[x][y] = True\n if x > 0:\n X, Y = x - 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if x < boardWidth - 1:\n X, Y = x + 1, y\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y > 0:\n X, Y = x, y - 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n if y < boardHeight - 1:\n X, Y = x, y + 1\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False):\n queue.enqueue([X, Y])\n for x in range(boardWidth):\n for y in range(boardHeight):\n if conqueredAt[x][y] == True:\n board[x][y] = newColor\n\n\ndef leftTopPixelCoordOfBox(boxx, boxy):\n xmargin = int((WINDOWWIDTH - boardWidth * boxSize) / 2 + 23)\n ymargin = int((WINDOWHEIGHT - boardHeight * boxSize) / 2)\n return boxx * boxSize + xmargin, boxy * boxSize + ymargin\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#\r\n# In development by Jihye Sofia Seo https://www.linkedin.com/in/jihyeseo\r\n# forked from the code of Al Sweigart \r\n# http://inventwithpython.com/pygame/chapter10.html \r\n# whose books are very helpful for learning Python and PyGame. Many thanks!\r\n# Main change is that his version uses flood fill algorithm, which could not run for large boards.\r\n# This file modified the algorithm. \r\n#\r\n# Flood-It is an NP hard problem http://arxiv.org/abs/1001.4420 for 3 colors or more. \r\n# The goal of this project is to find an efficient algorithm for autoplay.\r\n#\r\n# Any comments are welcome at [email protected] \r\n# upload: May 7 2016 Berlin Germany\r\n#\r\n\r\nimport random, sys, webbrowser, copy, pygame\r\nfrom pygame.locals import *\r\n \r\n#sys.setrecursionlimit(1000000)\r\n \r\n#FPS = 30\r\nWINDOWWIDTH = 1920\r\nWINDOWHEIGHT = 1000\r\nboxSize = 20\r\nPALETTEGAPSIZE = 5\r\nPALETTESIZE = 30\r\n \r\nboardWidth = 93\r\nboardHeight = 49 \r\n\r\n# Creates a board data structure with random colors for each box.\r\nboard = []\r\n\r\n\r\nconqueredAt = [[False for y in range(boardHeight)] for x in range(boardWidth)] \r\nneverQueue = [[False for y in range(boardHeight)] for x in range(boardWidth)] \r\n \r\nconqueredAt[0][0] = True \r\n\r\n\r\nclass Queue:\r\n def __init__(self):\r\n self.items = []\r\n\r\n def isEmpty(self):\r\n return self.items == []\r\n\r\n def enqueue(self, item):\r\n self.items.insert(0,item)\r\n\r\n def dequeue(self):\r\n return self.items.pop()\r\n\r\n def size(self):\r\n return len(self.items)\r\n\r\n\r\ndef buildQueue(): # add only boundaries\r\n floodQueue = Queue() \r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n if (neverQueue[x][y] == False) & (conqueredAt[x][y] == True): \r\n noFrontier = True \r\n if (x > 0) :\r\n noFrontier = noFrontier & (conqueredAt[x-1][y]) \r\n if (x < boardWidth - 1):\r\n noFrontier = noFrontier & (conqueredAt[x+1][y])\r\n if (y > 0):\r\n noFrontier = noFrontier & (conqueredAt[x][y-1])\r\n if (y < boardHeight - 1):\r\n noFrontier = noFrontier & (conqueredAt[x][y+1]) \r\n if noFrontier :\r\n neverQueue[x][y] = True\r\n else: \r\n floodQueue.enqueue([x, y]) \r\n \r\n return floodQueue\r\n \r\n# R G B\r\nWHITE = (255, 255, 255)\r\nDARKGRAY = ( 70, 70, 70)\r\nBLACK = ( 0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = ( 0, 255, 0)\r\nBLUE = ( 0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nORANGE = (255, 128, 0)\r\nPURPLE = (255, 0, 255)\r\n# The first color in each scheme is the background color, the next six are the palette colors.\r\nCOLORSCHEMES = ((150, 200, 255), \r\n (97, 215, 164) , #lightGr \r\n (0, 125, 50) ,#darkGr\r\n (23, 149, 195) , # light ocean\r\n (81, 85 , 141), # lightPur\r\n (147, 3, 167) , # purple\r\n (241, 109, 149), # jindalle \r\n (255, 180, 115), # tangerine\r\n (166, 147, 0), # tangerine? \r\n (183, 182, 208), # gray\r\n (68, 0, 0) # drak grey\r\n )\r\nbgColor = COLORSCHEMES[0]\r\npaletteColors = COLORSCHEMES[1:]\r\n\r\ndef main():\r\n global FPSCLOCK, DISPLAYSURF\r\n\r\n pygame.init() \r\n DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\r\n\r\n pygame.display.set_caption('Flood it')\r\n generateRandomBoard(boardWidth, boardHeight)\r\n lastPaletteClicked = None\r\n\r\n while True: # main game loop\r\n paletteClicked = None\r\n\r\n # Draw the screen.\r\n DISPLAYSURF.fill(bgColor) \r\n drawBoard() \r\n drawPalettes()\r\n pygame.display.update()\r\n \r\n for event in pygame.event.get(KEYUP): # get all the KEYUP events\r\n if event.key == K_ESCAPE:\r\n pygame.quit() # terminate if the KEYUP event was for the Esc key\r\n sys.exit()\r\n elif event.key == K_0:\r\n paletteClicked = 9 \r\n elif event.key == K_1:\r\n paletteClicked = 0 \r\n elif event.key == K_2:\r\n paletteClicked = 1 \r\n elif event.key == K_3:\r\n paletteClicked = 2 \r\n elif event.key == K_4:\r\n paletteClicked = 3 \r\n elif event.key == K_5:\r\n paletteClicked = 4 \r\n elif event.key == K_6:\r\n paletteClicked = 5 \r\n elif event.key == K_7:\r\n paletteClicked = 6 \r\n elif event.key == K_8:\r\n paletteClicked = 7 \r\n elif event.key == K_9:\r\n paletteClicked = 8 \r\n # pygame.event.post(event) # put the other KEYUP event objects back\r\n \r\n paletteClicked = random.randint(0,9)\r\n pygame.time.wait(50)\r\n if paletteClicked != None and paletteClicked != lastPaletteClicked:\r\n # a palette button was clicked that is different from the\r\n # last palette button clicked (this check prevents the player\r\n # from accidentally clicking the same palette twice)\r\n lastPaletteClicked = paletteClicked\r\n #if board[0][0] != paletteClicked : \r\n floodFill(board[0][0], paletteClicked, buildQueue())\r\n drawBoard()\r\n pygame.display.update()\r\n # FPSCLOCK.tick(FPS) \r\n # pygame.display.update()\r\n #FPSCLOCK.tick(FPS)\r\n\r\ndef generateRandomBoard(width, height): \r\n for x in range(width):\r\n column = []\r\n for y in range(height): \r\n column.append(random.randint(0, len(paletteColors) - 1))\r\n board.append(column) \r\n\r\ndef drawBoard():\r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n left, top = leftTopPixelCoordOfBox(x, y) \r\n pygame.draw.rect(DISPLAYSURF, (paletteColors[board[x][y]]), (left, top, boxSize, boxSize))\r\n DISPLAYSURF.blit(DISPLAYSURF, (0, 0))\r\n\r\ndef drawPalettes():\r\n # Draws the six color palettes at the left of the screen.\r\n numColors = len(paletteColors)\r\n textSize = 30\r\n font = pygame.font.Font(None, textSize)\r\n for i in range(numColors):\r\n top = 10 + (i * PALETTESIZE) + (i * PALETTEGAPSIZE)\r\n left = 10\r\n pygame.draw.rect(DISPLAYSURF, paletteColors[i], (left, top, PALETTESIZE, PALETTESIZE))\r\n textImg = font.render( str((i+1) % 10), 1, bgColor)\r\n DISPLAYSURF.blit( textImg, (left+10 +0*(PALETTESIZE/2-textSize/2),top+7 +0*(PALETTESIZE/2-textSize/2)))\r\n \r\ndef floodFill(teamColor, newColor, queue): \r\n while(queue.isEmpty() == False):\r\n checkHere = queue.dequeue() \r\n (x,y) = (checkHere[0],checkHere[1])\r\n \r\n board[x][y] = newColor \r\n conqueredAt[x][y] = True \r\n \r\n if x > 0 :\r\n (X,Y) = (x-1,y) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to the left\r\n if x < boardWidth - 1:\r\n (X,Y) = (x+1,y) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to the right\r\n if y > 0:\r\n (X,Y) = (x,y-1) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to up\r\n if y < boardHeight - 1:\r\n (X,Y) = (x,y+1) \r\n if (board[X][Y] == teamColor) & (conqueredAt[X][Y] == False): \r\n queue.enqueue([X, Y]) # on box to down\r\n for x in range(boardWidth):\r\n for y in range(boardHeight):\r\n if conqueredAt[x][y] == True :\r\n board[x][y] = newColor \r\n\r\ndef leftTopPixelCoordOfBox(boxx, boxy):\r\n # Returns the x and y of the left-topmost pixel of the xth & yth box.\r\n xmargin = int((WINDOWWIDTH - (boardWidth * boxSize)) / 2 + 23)\r\n ymargin = int((WINDOWHEIGHT - (boardHeight * boxSize)) / 2 )\r\n return (boxx * boxSize + xmargin, boxy * boxSize + ymargin)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "step-ids": [ 11, 12, 13, 16, 17 ] }
[ 11, 12, 13, 16, 17 ]
import erequests from pyarc.base import RestException class ResultWrapper(object): def __init__(self, client, method, url): self.client = client self.method = method self.url = url self.response = None def get(self): if self.response is None: self.client.wait_all_requests_completed() if self.response.status_code >= 400: raise RestException(self.method, self.url, self.response.status_code, self.response.text) try: return self.response.json() except ValueError: return self.response.text _METHODS = { 'get' : erequests.async.get, 'put' : erequests.async.put, 'post' : erequests.async.post, 'delete' : erequests.async.delete } class ERequestsClient(object): def __init__(self, verify = None): self.requests_to_send = [] self.results = [] self.verify = verify or False def start_req(self, method, prepared_url, headers, body = ''): method = method.lower() assert method in _METHODS, "Unknown method %s" % method future = _METHODS[method](prepared_url, headers = headers, data = body, verify = self.verify) res = ResultWrapper(self, method, prepared_url) self.requests_to_send.append(future) self.results.append(res) return res def wait_all_requests_completed(self): if len(self.requests_to_send) == 0: return try: for resp, result in zip(erequests.map(self.requests_to_send), self.results): result.response = resp finally: self.requests_to_send = [] self.results = []
normal
{ "blob_id": "4d1157b307d753abea721b93779ccc989c77d8e3", "index": 6876, "step-1": "import erequests\nfrom pyarc.base import RestException\n\n\nclass ResultWrapper(object):\n def __init__(self, client, method, url):\n self.client = client\n self.method = method\n self.url = url\n self.response = None\n\n def get(self):\n if self.response is None:\n self.client.wait_all_requests_completed()\n if self.response.status_code >= 400:\n raise RestException(self.method,\n self.url,\n self.response.status_code,\n self.response.text)\n try:\n return self.response.json()\n except ValueError:\n return self.response.text\n\n\n_METHODS = {\n 'get' : erequests.async.get,\n 'put' : erequests.async.put,\n 'post' : erequests.async.post,\n 'delete' : erequests.async.delete\n }\n\n\nclass ERequestsClient(object):\n def __init__(self, verify = None):\n self.requests_to_send = []\n self.results = []\n self.verify = verify or False\n\n def start_req(self, method, prepared_url, headers, body = ''):\n method = method.lower()\n assert method in _METHODS, \"Unknown method %s\" % method\n\n future = _METHODS[method](prepared_url,\n headers = headers,\n data = body,\n verify = self.verify)\n res = ResultWrapper(self, method, prepared_url)\n self.requests_to_send.append(future)\n self.results.append(res)\n return res\n\n def wait_all_requests_completed(self):\n if len(self.requests_to_send) == 0:\n return\n try:\n for resp, result in zip(erequests.map(self.requests_to_send), self.results):\n result.response = resp\n finally:\n self.requests_to_send = []\n self.results = []\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from utils.gradient_strategy.dct_generator import DCTGenerator from utils.gradient_strategy.random_generator import RandomGenerator from utils.gradient_strategy.upsample_generator import UpSampleGenerator from utils.gradient_strategy.centerconv_generator import CenterConvGenerator from utils.attack_setting import * from utils.construct_model_data import construct_model_and_data from utils.generate_model import ImageModel from utils.generate_video import video from utils.load_data import ImageData, split_data from utils.show_or_save import * from utils.gradient_strategy.centerconv_generator import CenterConvGenerator
normal
{ "blob_id": "399097ef7cfdc061b307c3cc29615c9f50b1e6bf", "index": 5511, "step-1": "<mask token>\n", "step-2": "from utils.gradient_strategy.dct_generator import DCTGenerator\nfrom utils.gradient_strategy.random_generator import RandomGenerator\nfrom utils.gradient_strategy.upsample_generator import UpSampleGenerator\nfrom utils.gradient_strategy.centerconv_generator import CenterConvGenerator\nfrom utils.attack_setting import *\nfrom utils.construct_model_data import construct_model_and_data\nfrom utils.generate_model import ImageModel\nfrom utils.generate_video import video\nfrom utils.load_data import ImageData, split_data\nfrom utils.show_or_save import *\nfrom utils.gradient_strategy.centerconv_generator import CenterConvGenerator\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
class Pwm(): def __init__(self, number, path, features): self.id = number self.path = path + 'pwm' + number self.features = features self.duty = self.get_feature('') self.enable = self.get_feature('_enable') def get_feature(self, feature): return self.features['pwm' + self.id + feature] def set_feature(self, feature, value=0): pass def __str__(self): return 'pwm{}'.format(self.id)
normal
{ "blob_id": "c38aff77a7beebc13e7486150d549b876c830db8", "index": 6104, "step-1": "class Pwm:\n\n def __init__(self, number, path, features):\n self.id = number\n self.path = path + 'pwm' + number\n self.features = features\n self.duty = self.get_feature('')\n self.enable = self.get_feature('_enable')\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "class Pwm:\n\n def __init__(self, number, path, features):\n self.id = number\n self.path = path + 'pwm' + number\n self.features = features\n self.duty = self.get_feature('')\n self.enable = self.get_feature('_enable')\n <mask token>\n <mask token>\n\n def __str__(self):\n return 'pwm{}'.format(self.id)\n", "step-3": "class Pwm:\n\n def __init__(self, number, path, features):\n self.id = number\n self.path = path + 'pwm' + number\n self.features = features\n self.duty = self.get_feature('')\n self.enable = self.get_feature('_enable')\n\n def get_feature(self, feature):\n return self.features['pwm' + self.id + feature]\n <mask token>\n\n def __str__(self):\n return 'pwm{}'.format(self.id)\n", "step-4": "class Pwm:\n\n def __init__(self, number, path, features):\n self.id = number\n self.path = path + 'pwm' + number\n self.features = features\n self.duty = self.get_feature('')\n self.enable = self.get_feature('_enable')\n\n def get_feature(self, feature):\n return self.features['pwm' + self.id + feature]\n\n def set_feature(self, feature, value=0):\n pass\n\n def __str__(self):\n return 'pwm{}'.format(self.id)\n", "step-5": "\nclass Pwm():\n\n\tdef __init__(self, number, path, features):\n\t\tself.id = number\n\t\tself.path = path + 'pwm' + number\n\t\tself.features = features\n\t\tself.duty = self.get_feature('')\n\t\tself.enable = self.get_feature('_enable')\n\n\tdef get_feature(self, feature):\n\t\treturn self.features['pwm' + self.id + feature]\n\n\tdef set_feature(self, feature, value=0):\n\t\tpass\n\n\tdef __str__(self):\n\t\treturn 'pwm{}'.format(self.id)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from scrapy import cmdline cmdline.execute("scrapy crawl ariz".split())
normal
{ "blob_id": "abb2cfd2113e8de6c7bba42c357f0ec140b224a9", "index": 3311, "step-1": "<mask token>\n", "step-2": "<mask token>\ncmdline.execute('scrapy crawl ariz'.split())\n", "step-3": "from scrapy import cmdline\ncmdline.execute('scrapy crawl ariz'.split())\n", "step-4": "from scrapy import cmdline\ncmdline.execute(\"scrapy crawl ariz\".split())", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import pandas as pd import numpy as np import logging import sklearn from joblib import load import sys import warnings import os if not sys.warnoptions: warnings.simplefilter("ignore") class model: def __init__(self): #from number to labels self.number_to_label = {1 : "Bot",2 : 'DoS attack',3 : 'Brute Force', 5 : 'DDoS attacks',4 : 0} # load the pretrained model try: self.model = load('./decision_tree_model.joblib') self.attack_model = load('./attack_model.joblib') except: # error if model can't be found in the path logging.error("Model can\'t be found in the main directory") logging.error("please fix the problem and restart the server") # load the features for the preprocessing step try: self.all_features = open("./all_features.txt", "r").readline().split(',') self.features = open("./features.txt", "r").read().splitlines() except: # error if features file can't be found in the path logging.error("features.txt can\'t be found in the main directory") logging.error("please fix the problem and restart the server") def preprocess(self,data): #select only the columns that works best with the pretrained model data = data[self.features] #remove infinite and null values data = data.replace([np.inf, -np.inf], np.nan) data = data.dropna() #change the type of the data to float data = data.astype("float") #return the data as numpy array return data.to_numpy() def load_data_csv(self,path = './data_examples/example.csv'): #load and preprocess the csv file self.data = pd.read_csv(path) #for evaluation tasks, we will save the label if ('Label' in self.data.columns): self.label = self.data['Label'].to_numpy() else: self.label = None logging.info('This data is labeled') self.data = self.preprocess(self.data) def load_data(self, rows) : #Load and preprocess strings in csv format self.data =pd.DataFrame([x.strip(',').split(',') for x in rows.strip('bpoint').split('bpoint')],columns = self.all_features) self.data = self.preprocess(self.data) def predict(self): results = [] #predict the class of the flow self.prediction = self.model.predict(self.data).astype('int32') #in case of one row prediction if (self.prediction.shape[0] == 1 ): if (self.prediction.item() == 1): results.append(self.number_to_label[self.attack_model.predict(self.data[0,:].reshape(1, -1)).item()]) else: results.append(0) else: for i in range(self.prediction.shape[0]): if (self.prediction[i] == 1): results.append(self.number_to_label[self.attack_model.predict(self.data[i,:].reshape(1, -1)).item()]) else: results.append(0) return results def accuracy(self): #calculate accuracy in case of label availaiblity if (self.label is None): logging.error("Score can't be calculated, No label provided") logging.error("be sure to name your label column with 'Lebel'") return None else: from sklearn.metrics import accuracy_score accuracy = accuracy_score(self.label, self.prediction) return accuracy """ m = model() m.load_data(sys.argv[1]) prediction = m.predict() """
normal
{ "blob_id": "c0f3a957613a4f4e04aeb3eb2e3fa4053bd0122c", "index": 8438, "step-1": "<mask token>\n\n\nclass model:\n\n def __init__(self):\n self.number_to_label = {(1): 'Bot', (2): 'DoS attack', (3):\n 'Brute Force', (5): 'DDoS attacks', (4): 0}\n try:\n self.model = load('./decision_tree_model.joblib')\n self.attack_model = load('./attack_model.joblib')\n except:\n logging.error(\"Model can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n try:\n self.all_features = open('./all_features.txt', 'r').readline(\n ).split(',')\n self.features = open('./features.txt', 'r').read().splitlines()\n except:\n logging.error(\"features.txt can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n\n def preprocess(self, data):\n data = data[self.features]\n data = data.replace([np.inf, -np.inf], np.nan)\n data = data.dropna()\n data = data.astype('float')\n return data.to_numpy()\n\n def load_data_csv(self, path='./data_examples/example.csv'):\n self.data = pd.read_csv(path)\n if 'Label' in self.data.columns:\n self.label = self.data['Label'].to_numpy()\n else:\n self.label = None\n logging.info('This data is labeled')\n self.data = self.preprocess(self.data)\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass model:\n\n def __init__(self):\n self.number_to_label = {(1): 'Bot', (2): 'DoS attack', (3):\n 'Brute Force', (5): 'DDoS attacks', (4): 0}\n try:\n self.model = load('./decision_tree_model.joblib')\n self.attack_model = load('./attack_model.joblib')\n except:\n logging.error(\"Model can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n try:\n self.all_features = open('./all_features.txt', 'r').readline(\n ).split(',')\n self.features = open('./features.txt', 'r').read().splitlines()\n except:\n logging.error(\"features.txt can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n\n def preprocess(self, data):\n data = data[self.features]\n data = data.replace([np.inf, -np.inf], np.nan)\n data = data.dropna()\n data = data.astype('float')\n return data.to_numpy()\n\n def load_data_csv(self, path='./data_examples/example.csv'):\n self.data = pd.read_csv(path)\n if 'Label' in self.data.columns:\n self.label = self.data['Label'].to_numpy()\n else:\n self.label = None\n logging.info('This data is labeled')\n self.data = self.preprocess(self.data)\n <mask token>\n\n def predict(self):\n results = []\n self.prediction = self.model.predict(self.data).astype('int32')\n if self.prediction.shape[0] == 1:\n if self.prediction.item() == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[0, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n else:\n for i in range(self.prediction.shape[0]):\n if self.prediction[i] == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[i, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n return results\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass model:\n\n def __init__(self):\n self.number_to_label = {(1): 'Bot', (2): 'DoS attack', (3):\n 'Brute Force', (5): 'DDoS attacks', (4): 0}\n try:\n self.model = load('./decision_tree_model.joblib')\n self.attack_model = load('./attack_model.joblib')\n except:\n logging.error(\"Model can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n try:\n self.all_features = open('./all_features.txt', 'r').readline(\n ).split(',')\n self.features = open('./features.txt', 'r').read().splitlines()\n except:\n logging.error(\"features.txt can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n\n def preprocess(self, data):\n data = data[self.features]\n data = data.replace([np.inf, -np.inf], np.nan)\n data = data.dropna()\n data = data.astype('float')\n return data.to_numpy()\n\n def load_data_csv(self, path='./data_examples/example.csv'):\n self.data = pd.read_csv(path)\n if 'Label' in self.data.columns:\n self.label = self.data['Label'].to_numpy()\n else:\n self.label = None\n logging.info('This data is labeled')\n self.data = self.preprocess(self.data)\n <mask token>\n\n def predict(self):\n results = []\n self.prediction = self.model.predict(self.data).astype('int32')\n if self.prediction.shape[0] == 1:\n if self.prediction.item() == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[0, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n else:\n for i in range(self.prediction.shape[0]):\n if self.prediction[i] == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[i, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n return results\n\n def accuracy(self):\n if self.label is None:\n logging.error(\"Score can't be calculated, No label provided\")\n logging.error(\"be sure to name your label column with 'Lebel'\")\n return None\n else:\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(self.label, self.prediction)\n return accuracy\n\n\n<mask token>\n", "step-4": "<mask token>\nif not sys.warnoptions:\n warnings.simplefilter('ignore')\n\n\nclass model:\n\n def __init__(self):\n self.number_to_label = {(1): 'Bot', (2): 'DoS attack', (3):\n 'Brute Force', (5): 'DDoS attacks', (4): 0}\n try:\n self.model = load('./decision_tree_model.joblib')\n self.attack_model = load('./attack_model.joblib')\n except:\n logging.error(\"Model can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n try:\n self.all_features = open('./all_features.txt', 'r').readline(\n ).split(',')\n self.features = open('./features.txt', 'r').read().splitlines()\n except:\n logging.error(\"features.txt can't be found in the main directory\")\n logging.error('please fix the problem and restart the server')\n\n def preprocess(self, data):\n data = data[self.features]\n data = data.replace([np.inf, -np.inf], np.nan)\n data = data.dropna()\n data = data.astype('float')\n return data.to_numpy()\n\n def load_data_csv(self, path='./data_examples/example.csv'):\n self.data = pd.read_csv(path)\n if 'Label' in self.data.columns:\n self.label = self.data['Label'].to_numpy()\n else:\n self.label = None\n logging.info('This data is labeled')\n self.data = self.preprocess(self.data)\n\n def load_data(self, rows):\n self.data = pd.DataFrame([x.strip(',').split(',') for x in rows.\n strip('bpoint').split('bpoint')], columns=self.all_features)\n self.data = self.preprocess(self.data)\n\n def predict(self):\n results = []\n self.prediction = self.model.predict(self.data).astype('int32')\n if self.prediction.shape[0] == 1:\n if self.prediction.item() == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[0, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n else:\n for i in range(self.prediction.shape[0]):\n if self.prediction[i] == 1:\n results.append(self.number_to_label[self.attack_model.\n predict(self.data[i, :].reshape(1, -1)).item()])\n else:\n results.append(0)\n return results\n\n def accuracy(self):\n if self.label is None:\n logging.error(\"Score can't be calculated, No label provided\")\n logging.error(\"be sure to name your label column with 'Lebel'\")\n return None\n else:\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(self.label, self.prediction)\n return accuracy\n\n\n<mask token>\n", "step-5": "import pandas as pd\nimport numpy as np\nimport logging\nimport sklearn\nfrom joblib import load\nimport sys\nimport warnings\nimport os\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\nclass model:\n def __init__(self):\n #from number to labels\n self.number_to_label = {1 : \"Bot\",2 : 'DoS attack',3 : 'Brute Force', 5 : 'DDoS attacks',4 : 0}\n # load the pretrained model \n try:\n self.model = load('./decision_tree_model.joblib')\n self.attack_model = load('./attack_model.joblib')\n except:\n # error if model can't be found in the path\n logging.error(\"Model can\\'t be found in the main directory\")\n logging.error(\"please fix the problem and restart the server\")\n\n # load the features for the preprocessing step\n try:\n self.all_features = open(\"./all_features.txt\", \"r\").readline().split(',')\n self.features = open(\"./features.txt\", \"r\").read().splitlines()\n except:\n # error if features file can't be found in the path\n logging.error(\"features.txt can\\'t be found in the main directory\")\n logging.error(\"please fix the problem and restart the server\")\n\n def preprocess(self,data):\n #select only the columns that works best with the pretrained model\n data = data[self.features]\n #remove infinite and null values\n data = data.replace([np.inf, -np.inf], np.nan)\n data = data.dropna()\n #change the type of the data to float\n data = data.astype(\"float\")\n #return the data as numpy array\n return data.to_numpy()\n\n def load_data_csv(self,path = './data_examples/example.csv'):\n #load and preprocess the csv file\n self.data = pd.read_csv(path)\n #for evaluation tasks, we will save the label\n if ('Label' in self.data.columns):\n self.label = self.data['Label'].to_numpy()\n else:\n self.label = None\n logging.info('This data is labeled')\n\n self.data = self.preprocess(self.data)\n\n def load_data(self, rows) :\n #Load and preprocess strings in csv format \n self.data =pd.DataFrame([x.strip(',').split(',') for x in rows.strip('bpoint').split('bpoint')],columns = self.all_features)\n self.data = self.preprocess(self.data)\n\n def predict(self):\n results = []\n #predict the class of the flow\n self.prediction = self.model.predict(self.data).astype('int32')\n #in case of one row prediction\n if (self.prediction.shape[0] == 1 ):\n if (self.prediction.item() == 1):\n results.append(self.number_to_label[self.attack_model.predict(self.data[0,:].reshape(1, -1)).item()])\n else:\n results.append(0)\n \n else:\n for i in range(self.prediction.shape[0]):\n if (self.prediction[i] == 1):\n results.append(self.number_to_label[self.attack_model.predict(self.data[i,:].reshape(1, -1)).item()])\n else:\n results.append(0)\n return results\n \n def accuracy(self):\n #calculate accuracy in case of label availaiblity\n if (self.label is None):\n logging.error(\"Score can't be calculated, No label provided\")\n logging.error(\"be sure to name your label column with 'Lebel'\")\n return None\n else:\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(self.label, self.prediction)\n return accuracy\n\"\"\"\nm = model()\nm.load_data(sys.argv[1])\nprediction = m.predict()\n\"\"\"\n\n\n", "step-ids": [ 4, 5, 6, 8, 10 ] }
[ 4, 5, 6, 8, 10 ]
import numpy as np from math import ceil, log2 def avg(list): return np.mean(list) def dispersion(list): res = 0 for i in list: res += (i - np.mean(list)) ** 2 return res / len(list) def variation_coefficient(list): return (dispersion(list) ** (1/2) / np.mean(list)) * 100 def chi_square(list): b = sorted(list) k = ceil(log2(len(list)) + 1) step = 10000 / k p = 1 / k frequency_vector = [] for i in range(k): counter = 0 for j in b: if (j > i * step) and (j <= (i + 1) * step): counter += 1 else: continue frequency_vector.append(counter) chi = 0 for i in range(k): chi += ((frequency_vector[i] - p * len(list)) ** 2) / (p * len(list)) return 0.8 <= chi <= 16.8
normal
{ "blob_id": "f2b978b9a4c00469cdd2f5e1e9275df73c7379b8", "index": 3904, "step-1": "<mask token>\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\n<mask token>\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n", "step-2": "<mask token>\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\n<mask token>\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n", "step-3": "<mask token>\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return dispersion(list) ** (1 / 2) / np.mean(list) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n", "step-4": "import numpy as np\nfrom math import ceil, log2\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return dispersion(list) ** (1 / 2) / np.mean(list) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n frequency_vector = []\n for i in range(k):\n counter = 0\n for j in b:\n if j > i * step and j <= (i + 1) * step:\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += (frequency_vector[i] - p * len(list)) ** 2 / (p * len(list))\n return 0.8 <= chi <= 16.8\n", "step-5": "import numpy as np\nfrom math import ceil, log2\n\n\ndef avg(list):\n return np.mean(list)\n\n\ndef dispersion(list):\n res = 0\n for i in list:\n res += (i - np.mean(list)) ** 2\n return res / len(list)\n\n\ndef variation_coefficient(list):\n return (dispersion(list) ** (1/2) / np.mean(list)) * 100\n\n\ndef chi_square(list):\n b = sorted(list)\n k = ceil(log2(len(list)) + 1)\n step = 10000 / k\n p = 1 / k\n\n frequency_vector = []\n\n for i in range(k):\n counter = 0\n for j in b:\n if (j > i * step) and (j <= (i + 1) * step):\n counter += 1\n else:\n continue\n frequency_vector.append(counter)\n chi = 0\n for i in range(k):\n chi += ((frequency_vector[i] - p * len(list)) ** 2) / (p * len(list))\n\n return 0.8 <= chi <= 16.8\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import ipaddress import subprocess from subprocess import Popen, PIPE import time ip_net = ipaddress.ip_network('192.168.0.100/30') for i in ip_net.hosts(): # print(i) host_add = str(i) toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE) output = toping.communicate()[0] hostalive = toping.returncode if hostalive == 0: print(host_add,"is reachable") else: print(host_add,"is not reachable") # print(output) # time.sleep(3) # if toping ==0: # print(i, ' is alive') # else: # print(i,' is not alive')
normal
{ "blob_id": "414fb437783fcfb55f542f072aaf3a8bb02b441e", "index": 8275, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-3": "<mask token>\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-4": "import ipaddress\nimport subprocess\nfrom subprocess import Popen, PIPE\nimport time\nip_net = ipaddress.ip_network('192.168.0.100/30')\nfor i in ip_net.hosts():\n host_add = str(i)\n toping = subprocess.Popen(['ping', '-n', '3', host_add], stdout=PIPE)\n output = toping.communicate()[0]\n hostalive = toping.returncode\n if hostalive == 0:\n print(host_add, 'is reachable')\n else:\n print(host_add, 'is not reachable')\n", "step-5": "import ipaddress\r\nimport subprocess\r\nfrom subprocess import Popen, PIPE\r\nimport time\r\n\r\nip_net = ipaddress.ip_network('192.168.0.100/30')\r\nfor i in ip_net.hosts():\r\n # print(i)\r\n host_add = str(i)\r\n toping = subprocess.Popen(['ping', '-n', '3',host_add],stdout=PIPE)\r\n\r\n output = toping.communicate()[0]\r\n hostalive = toping.returncode\r\n if hostalive == 0:\r\n print(host_add,\"is reachable\")\r\n else:\r\n print(host_add,\"is not reachable\")\r\n # print(output)\r\n # time.sleep(3)\r\n # if toping ==0:\r\n # print(i, ' is alive')\r\n # else:\r\n # print(i,' is not alive')\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
for i in range(0, 20): if i % 20 == 0: print('Stop It') else: print('The For Loop Failed')
normal
{ "blob_id": "bfb2d7b811fd450b53493375fa130649349d308f", "index": 174, "step-1": "<mask token>\n", "step-2": "for i in range(0, 20):\n if i % 20 == 0:\n print('Stop It')\nelse:\n print('The For Loop Failed')\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
# !/usr/bin/env python # coding: utf-8 __author__ = 'zhouhenglc' TIME_FORMAT = '%Y-%m-%d %H:%M:%S' ENCODING = 'utf-8' # exam mode # G_SELECT_MODE # 待废弃,逐步完善使用classes.objects.question_type # G_SELECT_MODE = ["无", "选择题", "名词解释", "简答题", "计算题", "论述题", "多选题", "判断题"] G_MULTI_MODE = [6, ] # 多选题型 多选题=6 # G_DEF_OPTIONS = [1, 6] # 自定义选项 单选题=1 多选题=6 # exam status STATUS_ONLINE = 64 STATUS_OFFLINE = 128 # token error TOKEN_BAD_FORMAT = 'token_bad_format' # login again TOKEN_EXPIRED = 'token_expired' # try refresh TOKEN_NOT_STORAGE = 'token_not_storage' # login again TOKEN_REQUIRE_REFRESH = 'token_require_refresh' # try refresh # training question state T_STATE_RIGHT = 'right' T_STATE_WRONG = 'wrong' T_STATE_SKIP = 'skip' T_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP] # resource constants R_EXAM = 'exam' R_QUESTION = 'question' R_VC = 'virtual_currency' R_SE = 'security' # resource event E_AFTER_UPDATE = 'after_update' E_GEN_TOKEN = 'gen_token' E_PARSING_TOKEN = 'parsing_token' E_NEW_BILLING = 'new_billing' E_SE_FIREWALL = 'security_firewall' # vc billing VB_FB = 'feedback_exam' VB_FB_NAME = '题库问题反馈得积分' VC_EC_EM = 'vc_exchange_exam_mem' VC_EC_EM_NAME = '积分换题库会员' # security handle action SE_ACTION_NORMAL = 'normal' SE_ACTION_WARN = 'warn' SE_ACTION_EXIT = 'exit' # DATA_REGISTRY keys DR_KEY_VC_GOODS = 'vc_goods' DR_KEY_ROUTES = 'routes' # goods type GOOD_TYPE_EXAM = 'exam'
normal
{ "blob_id": "4605a3f88c73b43fa7611a10a400ad2d4d7c6dfc", "index": 2273, "step-1": "<mask token>\n", "step-2": "__author__ = 'zhouhenglc'\nTIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nENCODING = 'utf-8'\nG_MULTI_MODE = [6]\nSTATUS_ONLINE = 64\nSTATUS_OFFLINE = 128\nTOKEN_BAD_FORMAT = 'token_bad_format'\nTOKEN_EXPIRED = 'token_expired'\nTOKEN_NOT_STORAGE = 'token_not_storage'\nTOKEN_REQUIRE_REFRESH = 'token_require_refresh'\nT_STATE_RIGHT = 'right'\nT_STATE_WRONG = 'wrong'\nT_STATE_SKIP = 'skip'\nT_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP]\nR_EXAM = 'exam'\nR_QUESTION = 'question'\nR_VC = 'virtual_currency'\nR_SE = 'security'\nE_AFTER_UPDATE = 'after_update'\nE_GEN_TOKEN = 'gen_token'\nE_PARSING_TOKEN = 'parsing_token'\nE_NEW_BILLING = 'new_billing'\nE_SE_FIREWALL = 'security_firewall'\nVB_FB = 'feedback_exam'\nVB_FB_NAME = '题库问题反馈得积分'\nVC_EC_EM = 'vc_exchange_exam_mem'\nVC_EC_EM_NAME = '积分换题库会员'\nSE_ACTION_NORMAL = 'normal'\nSE_ACTION_WARN = 'warn'\nSE_ACTION_EXIT = 'exit'\nDR_KEY_VC_GOODS = 'vc_goods'\nDR_KEY_ROUTES = 'routes'\nGOOD_TYPE_EXAM = 'exam'\n", "step-3": "# !/usr/bin/env python\n# coding: utf-8\n\n\n__author__ = 'zhouhenglc'\n\n\nTIME_FORMAT = '%Y-%m-%d %H:%M:%S'\nENCODING = 'utf-8'\n\n\n# exam mode\n# G_SELECT_MODE\n# 待废弃,逐步完善使用classes.objects.question_type\n# G_SELECT_MODE = [\"无\", \"选择题\", \"名词解释\", \"简答题\", \"计算题\", \"论述题\", \"多选题\", \"判断题\"]\nG_MULTI_MODE = [6, ] # 多选题型 多选题=6\n# G_DEF_OPTIONS = [1, 6] # 自定义选项 单选题=1 多选题=6\n\n# exam status\nSTATUS_ONLINE = 64\nSTATUS_OFFLINE = 128\n\n# token error\nTOKEN_BAD_FORMAT = 'token_bad_format' # login again\nTOKEN_EXPIRED = 'token_expired' # try refresh\nTOKEN_NOT_STORAGE = 'token_not_storage' # login again\nTOKEN_REQUIRE_REFRESH = 'token_require_refresh' # try refresh\n\n\n# training question state\nT_STATE_RIGHT = 'right'\nT_STATE_WRONG = 'wrong'\nT_STATE_SKIP = 'skip'\nT_STATES = [T_STATE_RIGHT, T_STATE_WRONG, T_STATE_SKIP]\n\n\n# resource constants\nR_EXAM = 'exam'\nR_QUESTION = 'question'\nR_VC = 'virtual_currency'\nR_SE = 'security'\n\n\n# resource event\nE_AFTER_UPDATE = 'after_update'\nE_GEN_TOKEN = 'gen_token'\nE_PARSING_TOKEN = 'parsing_token'\nE_NEW_BILLING = 'new_billing'\nE_SE_FIREWALL = 'security_firewall'\n\n\n# vc billing\nVB_FB = 'feedback_exam'\nVB_FB_NAME = '题库问题反馈得积分'\nVC_EC_EM = 'vc_exchange_exam_mem'\nVC_EC_EM_NAME = '积分换题库会员'\n\n\n# security handle action\nSE_ACTION_NORMAL = 'normal'\nSE_ACTION_WARN = 'warn'\nSE_ACTION_EXIT = 'exit'\n\n\n# DATA_REGISTRY keys\nDR_KEY_VC_GOODS = 'vc_goods'\nDR_KEY_ROUTES = 'routes'\n\n\n# goods type\nGOOD_TYPE_EXAM = 'exam'\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
"""Some random mathematical helper functions. """ from __future__ import division, print_function import math # STATISTICS def mean(L): """Calculate mean of given List""" return sum(L) / len(L) def variance(L, is_sample=0): """calculate variance (or sample variance) of given List""" m = mean(L) return sum((x-m)**2 for x in L) / (len(L) - is_sample) def std_dev(L, is_sample=0): """calculate standard deviation of given List""" return math.sqrt(variance(L, is_sample)) def z_score(num, mean, std_dev): """calculate z-score given sample size, mean and standard deviation""" return (num - mean) / std_dev # COMBINATORICS def fac(n): assert n >= 0 return n if n <= 2 else fac(n - 1) * n def over(n, k): """n over k""" return fac(n) // fac(n-k) def coin(coins, heads): """Probability for given number of heads (or tails) when throwing given number of fair coins.""" return Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c) def pick_grom_group(group, other, selected): """When selecting 'selected' number of individuums from 'group' and 'other', return probability that all are from 'group'.""" return Faction(over(group, selected), over(group + other, selected)) def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed): """Calculate probability for pulling a coin from a bag with fair and unfair coins and flipping it a number of times, each time coming up heads.""" part_fair = (num_coins - num_unfair) / num_coins part_unfair = num_unfair / num_coins prob_fair = 0.5**heads_needed prob_unfair = (percent_unfair / 100)**heads_needed return part_fair * prob_fair + part_unfair * prob_unfair # GEOMETRY def herons_formula(a, b, c): """Calculate area of triangle with sides a, b, and c.""" print("sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2") s = (a + b + c) / 2 return math.sqrt(s * (s-a) * (s-b) * (s-c)) def area_equilat(side): """Area of equilateral triangle.""" return side/2 * math.sqrt(side**2 - (side/2)**2) # LINEAR ALGEBRA def inv(a,b,c,d): """Inverse of 2x2 matrix.""" det = a*d-b*c m = lambda x: fractions.Fraction(x, det) return map(str, map(m, [d, -b, -c, a])) def det2(m): """Determinant of 2x2 matrix.""" (a,b), (c,d) = m return a*d - b*c def det3(m): """Determinant of 3x3 matrix.""" a, b, c = m[0] da = det2([ m[1][1:] , m[2][1:]]) db = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]]) dc = det2([ m[1][:2] , m[2][:2]]) return a*da - b*db + c*dc # SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER def series(r, n): """Calculate geometric series.""" return (1 - r**n) / (1 - r) def quad_form(a, b, c): """Quadratic Formula: calculate values of x so that ax^2+bx+c=0.""" sq = math.sqrt(b**2 - 4 * a * c) x1 = (-b - sq) / (2 * a) x2 = (-b + sq) / (2 * a) return (x1, x2) def master_method(a, b, d): """Estimate Complexity using Master Method, print result.""" if a == b**d: print("Case 1: a = b^d") print("-> O(n^%d log n)" % d) elif a < b**d: print("Case 2: a < b^d") print("-> O(n^%d)" % d) elif a > b**d: print("Case 3: a > b^d") print("-> O(n^log%d(%d))" % (b, a)) print(" = O(n^%.2f)" % math.log(a, b))
normal
{ "blob_id": "34acb6da1dc9403a311ce3bca0a828a77b7b36da", "index": 7403, "step-1": "<mask token>\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\n<mask token>\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\n<mask token>\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-3": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\n<mask token>\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\n<mask token>\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-4": "<mask token>\n\n\ndef mean(L):\n \"\"\"Calculate mean of given List\"\"\"\n return sum(L) / len(L)\n\n\ndef variance(L, is_sample=0):\n \"\"\"calculate variance (or sample variance) of given List\"\"\"\n m = mean(L)\n return sum((x - m) ** 2 for x in L) / (len(L) - is_sample)\n\n\ndef std_dev(L, is_sample=0):\n \"\"\"calculate standard deviation of given List\"\"\"\n return math.sqrt(variance(L, is_sample))\n\n\ndef z_score(num, mean, std_dev):\n \"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n return (num - mean) / std_dev\n\n\ndef fac(n):\n assert n >= 0\n return n if n <= 2 else fac(n - 1) * n\n\n\ndef over(n, k):\n \"\"\"n over k\"\"\"\n return fac(n) // fac(n - k)\n\n\ndef coin(coins, heads):\n \"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n return Fraction(int(fac(c) / fac(c - n) / fac(n)), 2 ** c)\n\n\ndef pick_grom_group(group, other, selected):\n \"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n return Faction(over(group, selected), over(group + other, selected))\n\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n \"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n part_fair = (num_coins - num_unfair) / num_coins\n part_unfair = num_unfair / num_coins\n prob_fair = 0.5 ** heads_needed\n prob_unfair = (percent_unfair / 100) ** heads_needed\n return part_fair * prob_fair + part_unfair * prob_unfair\n\n\ndef herons_formula(a, b, c):\n \"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n print('sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2')\n s = (a + b + c) / 2\n return math.sqrt(s * (s - a) * (s - b) * (s - c))\n\n\ndef area_equilat(side):\n \"\"\"Area of equilateral triangle.\"\"\"\n return side / 2 * math.sqrt(side ** 2 - (side / 2) ** 2)\n\n\ndef inv(a, b, c, d):\n \"\"\"Inverse of 2x2 matrix.\"\"\"\n det = a * d - b * c\n m = lambda x: fractions.Fraction(x, det)\n return map(str, map(m, [d, -b, -c, a]))\n\n\ndef det2(m):\n \"\"\"Determinant of 2x2 matrix.\"\"\"\n (a, b), (c, d) = m\n return a * d - b * c\n\n\ndef det3(m):\n \"\"\"Determinant of 3x3 matrix.\"\"\"\n a, b, c = m[0]\n da = det2([m[1][1:], m[2][1:]])\n db = det2([[m[1][0], m[1][2]], [m[2][0], m[2][2]]])\n dc = det2([m[1][:2], m[2][:2]])\n return a * da - b * db + c * dc\n\n\ndef series(r, n):\n \"\"\"Calculate geometric series.\"\"\"\n return (1 - r ** n) / (1 - r)\n\n\ndef quad_form(a, b, c):\n \"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n sq = math.sqrt(b ** 2 - 4 * a * c)\n x1 = (-b - sq) / (2 * a)\n x2 = (-b + sq) / (2 * a)\n return x1, x2\n\n\ndef master_method(a, b, d):\n \"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n if a == b ** d:\n print('Case 1: a = b^d')\n print('-> O(n^%d log n)' % d)\n elif a < b ** d:\n print('Case 2: a < b^d')\n print('-> O(n^%d)' % d)\n elif a > b ** d:\n print('Case 3: a > b^d')\n print('-> O(n^log%d(%d))' % (b, a))\n print(' = O(n^%.2f)' % math.log(a, b))\n", "step-5": "\"\"\"Some random mathematical helper functions.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport math\n\n\n# STATISTICS\n\ndef mean(L):\n\t\"\"\"Calculate mean of given List\"\"\"\n\treturn sum(L) / len(L)\n\t\ndef variance(L, is_sample=0):\n\t\"\"\"calculate variance (or sample variance) of given List\"\"\"\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)\n\t\ndef std_dev(L, is_sample=0):\n\t\"\"\"calculate standard deviation of given List\"\"\"\n\treturn math.sqrt(variance(L, is_sample))\n\ndef z_score(num, mean, std_dev):\n\t\"\"\"calculate z-score given sample size, mean and standard deviation\"\"\"\n\treturn (num - mean) / std_dev\n\n\n# COMBINATORICS\n\ndef fac(n):\n\tassert n >= 0\n\treturn n if n <= 2 else fac(n - 1) * n\n\ndef over(n, k):\n\t\"\"\"n over k\"\"\"\n\treturn fac(n) // fac(n-k)\n\ndef coin(coins, heads):\n\t\"\"\"Probability for given number of heads (or tails) when throwing given\n\tnumber of fair coins.\"\"\"\n\treturn Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)\n\ndef pick_grom_group(group, other, selected):\n\t\"\"\"When selecting 'selected' number of individuums from 'group' and 'other',\n\treturn probability that all are from 'group'.\"\"\"\n\treturn Faction(over(group, selected), over(group + other, selected))\n\ndef unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n\t\"\"\"Calculate probability for pulling a coin from a bag with fair and unfair\n\tcoins and flipping it a number of times, each time coming up heads.\"\"\"\n\tpart_fair = (num_coins - num_unfair) / num_coins\n\tpart_unfair = num_unfair / num_coins\n\tprob_fair = 0.5**heads_needed\n\tprob_unfair = (percent_unfair / 100)**heads_needed\n\treturn part_fair * prob_fair + part_unfair * prob_unfair\n\n\n# GEOMETRY\n\ndef herons_formula(a, b, c):\n\t\"\"\"Calculate area of triangle with sides a, b, and c.\"\"\"\n\tprint(\"sqrt(s*(s-a)*(s-b)*(s-c)) with s = (a + b + c)/2\")\n\ts = (a + b + c) / 2\n\treturn math.sqrt(s * (s-a) * (s-b) * (s-c))\n\t\ndef area_equilat(side):\n\t\"\"\"Area of equilateral triangle.\"\"\"\n\treturn side/2 * math.sqrt(side**2 - (side/2)**2)\n\n\n# LINEAR ALGEBRA\n\ndef inv(a,b,c,d):\n\t\"\"\"Inverse of 2x2 matrix.\"\"\"\n\tdet = a*d-b*c\n\tm = lambda x: fractions.Fraction(x, det)\n\treturn map(str, map(m, [d, -b, -c, a]))\n\ndef det2(m):\n\t\"\"\"Determinant of 2x2 matrix.\"\"\"\n\t(a,b), (c,d) = m\n\treturn a*d - b*c\n\ndef det3(m):\n\t\"\"\"Determinant of 3x3 matrix.\"\"\"\n\ta, b, c = m[0]\n\tda = det2([ m[1][1:] , m[2][1:]])\n\tdb = det2([[m[1][0],m[1][2]],[m[2][0],m[2][2]]])\n\tdc = det2([ m[1][:2] , m[2][:2]])\n\treturn a*da - b*db + c*dc\n\n\n# SOME COMPLEX FORMULAS I NEVER CAN QUITE REMEMBER\n\ndef series(r, n):\n\t\"\"\"Calculate geometric series.\"\"\"\n\treturn (1 - r**n) / (1 - r)\n\ndef quad_form(a, b, c):\n\t\"\"\"Quadratic Formula: calculate values of x so that ax^2+bx+c=0.\"\"\"\n\tsq = math.sqrt(b**2 - 4 * a * c)\n\tx1 = (-b - sq) / (2 * a)\n\tx2 = (-b + sq) / (2 * a)\n\treturn (x1, x2)\n\ndef master_method(a, b, d):\n\t\"\"\"Estimate Complexity using Master Method, print result.\"\"\"\n\tif a == b**d:\n\t\tprint(\"Case 1: a = b^d\")\n\t\tprint(\"-> O(n^%d log n)\" % d)\n\telif a < b**d:\n\t\tprint(\"Case 2: a < b^d\")\n\t\tprint(\"-> O(n^%d)\" % d)\n\telif a > b**d:\n\t\tprint(\"Case 3: a > b^d\")\n\t\tprint(\"-> O(n^log%d(%d))\" % (b, a))\n\t\tprint(\" = O(n^%.2f)\" % math.log(a, b))\n\n", "step-ids": [ 7, 14, 15, 17, 19 ] }
[ 7, 14, 15, 17, 19 ]
# help from https://stackoverflow.com/questions/19007383/compare-two-different-files-line-by-line-in-python with open('Book1.txt', 'r') as file1: with open('20k.txt', 'r') as file2: same = set(file1).intersection(file2) same.discard('\n') with open('notin20kforBook1.txt', 'w') as file_out: for line in same: file_out.write(line) with open('Book2.txt', 'r') as file3: with open('20k.txt', 'r') as file2: same = set(file3).intersection(file2) same.discard('\n') with open('notin20kforBook2.txt', 'w') as file_out: for line in same: file_out.write(line) with open('Book3.txt', 'r') as file4: with open('20k.txt', 'r') as file2: same = set(file4).intersection(file2) same.discard('\n') with open('notin20kforBook3.txt', 'w') as file_out: for line in same: file_out.write(line)
normal
{ "blob_id": "21a41356fcedb36223498db0fe783e4a9e8e1ba6", "index": 210, "step-1": "<mask token>\n", "step-2": "with open('Book1.txt', 'r') as file1:\n with open('20k.txt', 'r') as file2:\n same = set(file1).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook1.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\nwith open('Book2.txt', 'r') as file3:\n with open('20k.txt', 'r') as file2:\n same = set(file3).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook2.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\nwith open('Book3.txt', 'r') as file4:\n with open('20k.txt', 'r') as file2:\n same = set(file4).intersection(file2)\nsame.discard('\\n')\nwith open('notin20kforBook3.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n", "step-3": "# help from https://stackoverflow.com/questions/19007383/compare-two-different-files-line-by-line-in-python\n\nwith open('Book1.txt', 'r') as file1:\n with open('20k.txt', 'r') as file2:\n same = set(file1).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook1.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n\nwith open('Book2.txt', 'r') as file3:\n with open('20k.txt', 'r') as file2:\n same = set(file3).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook2.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n\nwith open('Book3.txt', 'r') as file4:\n with open('20k.txt', 'r') as file2:\n same = set(file4).intersection(file2)\n\nsame.discard('\\n')\n\nwith open('notin20kforBook3.txt', 'w') as file_out:\n for line in same:\n file_out.write(line)\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from typing import List from uuid import uuid4 from fastapi import APIRouter, Depends, FastAPI, File, UploadFile from sqlalchemy.orm import Session from starlette.requests import Request from Scripts.fastapp.common.consts import UPLOAD_DIRECTORY from Scripts.fastapp.database.conn import db # from Scripts.fastapp.database.schema import Users, ApiKeys, ApiWhiteLists from Scripts.fastapp.database.schema import Train, Files from Scripts.fastapp import models as m from Scripts.fastapp.errors import exceptions as ex import string import secrets from inspect import currentframe as frame from Scripts.fastapp.models import MessageOk, Test, Label from Scripts.fastapp.utils.file_module.test import t_load_data from Scripts.fastapp.utils.file_module.load_file_manager import loadFileManager from Scripts.fastapp.utils.preprocess_reg import preprocess_reg import os router = APIRouter(prefix='/pid') @router.get('/getIsPID', response_model=List[m.GetIsPID]) # @router.get('') async def show_data(request: Request, ispid): """ no params\n :return\n [\n {\n id: int = None\n name: str = None\n ext: str = None\n is_pid: bool = False\n },{\n ...\n }\n ]\n """ request.state.inspect = frame() print("### state.user : ", request.state.user) print("### state.inspect : ", request.state.inspect) print("###", request.url.hostname + request.url.path ) print("###", request.state.ip) result = Files.filter(is_pid=ispid).all() print("##RESULT##", result) # return dict(id=result[0].id, reg_count=result[0].reg_count) return result @router.get('/getTrain') async def get_train_data(request: Request, id: int): """ no params\n :return\n Train Model """ request.state.inspect = frame() result = Train.filter(file_id=id).order_by("id").all() print("##RESULT##", result) # return dict(id=result[0].id, reg_count=result[0].reg_count) return result # @router.post("/register", status_code=201, response_model=Label) @router.post("/register/{file_path}", status_code=201) async def input_data(file_path ,request: Request, session: Session = Depends(db.session)): """ file path를 입력해서 해당 파일을 DB에 등록하는 함수 지금은 사용 안함 """ print("start#########################################") request.state.inspect = frame() print(file_path) df = t_load_data(file_path) for row in df.itertuples(): print(row) print(row.page) Train.create(session, auto_commit=True,page=row.page ,reg_count=row.reg_count, column1=row.col1, column2=row.col2,column3=row.col3,column4=row.col4,column5=row.col5,column6=row.col6,column7=row.col7,column8=row.col8,column9=row.col9,column10=row.col10, y=-1) # d = Train.create(session, auto_commit=True, reg_count=3, column3=1, column7=1, y=1) # print(d.reg_count, d.id) print("#########################################") return MessageOk() @router.put('/update_y') async def update_label(request: Request, label_info: m.AddLabel): """ File Label Update\n :param request: :param y: :param label: :return: """ # user = request.state.user n_data = Train.filter(y= -1) request.state.inspect = frame() reet = n_data.update(auto_commit=True, **label_info.dict()) print("2##########################################") return reet @router.post('/show_file') async def show_file_data(request:Request, file_path): """ Ex_> D:/Project/pid/Scripts/fastapp/data/samples/pdf_sample2.pdf """ request.state.inspect = frame() # file type: Dictionary file = loadFileManager(file_path) return file.data @router.post("/files/") async def create_files(files: List[bytes] = File(...)): return {"file_sizes": [len(file) for file in files]} @router.post("/uploadfiles") async def create_upload_files(request: Request, files: List[UploadFile] = File(...), session: Session = Depends(db.session)): """ params: Files \n return: Last File's \n [ { page:1 td: dfsdf },{ page:2 td: asdasdasda } ] """ for file in files: contents = await file.read() print(os.path.join('./', file.filename)) # with open(os.path.join('./', file.filename), "wb") as fp: with open(UPLOAD_DIRECTORY + file.filename, "wb") as fp: fp.write(contents) f = loadFileManager(UPLOAD_DIRECTORY + file.filename) try: obj = Files.create(session, auto_commit=False, name=f.name, ext=f.ext, ip_add= request.state.ip ) # print(obj.id, f.name, f.ext, f.data) for p in f.data: df = preprocess_reg(p["td"]) Train.create(session, auto_commit=True, file_id=obj.id ,y=-1, page=p["page"]+1, text_data=p["td"], reg_count=int(df["reg_count"][0]), column1=int(df["col1"][0]), column2=int(df["col2"][0]), column3=int(df["col3"][0]),column4=int(df["col4"][0]),column5=int(df["col5"][0]),column6=int(df["col6"][0]), column7=int(df["col7"][0]),column8=int(df["col8"][0]),column9=int(df["col9"][0]),column10=int(df["col10"][0]) ) except Exception as e: raise ex.FileExtEx(file.filename) # 마지막 파일 f.data return f.data
normal
{ "blob_id": "349581774cded59ece6a5e8178d116c166a4a6b3", "index": 6841, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\[email protected]('/getIsPID', response_model=List[m.GetIsPID])\nasync def show_data(request: Request, ispid):\n \"\"\"\n no params\n\n :return\n\n [\n\n {\n\n id: int = None\n\n name: str = None\n\n ext: str = None\n\n is_pid: bool = False\n\n },{\n\n ...\n\n }\n\n ]\n\n \"\"\"\n request.state.inspect = frame()\n print('### state.user : ', request.state.user)\n print('### state.inspect : ', request.state.inspect)\n print('###', request.url.hostname + request.url.path)\n print('###', request.state.ip)\n result = Files.filter(is_pid=ispid).all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/getTrain')\nasync def get_train_data(request: Request, id: int):\n \"\"\"\n no params\n\n :return\n\n Train Model\n \"\"\"\n request.state.inspect = frame()\n result = Train.filter(file_id=id).order_by('id').all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/register/{file_path}', status_code=201)\nasync def input_data(file_path, request: Request, session: Session=Depends(\n db.session)):\n \"\"\"\n file path를 입력해서 해당 파일을 DB에 등록하는 함수\n 지금은 사용 안함\n \"\"\"\n print('start#########################################')\n request.state.inspect = frame()\n print(file_path)\n df = t_load_data(file_path)\n for row in df.itertuples():\n print(row)\n print(row.page)\n Train.create(session, auto_commit=True, page=row.page, reg_count=\n row.reg_count, column1=row.col1, column2=row.col2, column3=row.\n col3, column4=row.col4, column5=row.col5, column6=row.col6,\n column7=row.col7, column8=row.col8, column9=row.col9, column10=\n row.col10, y=-1)\n print('#########################################')\n return MessageOk()\n\n\[email protected]('/update_y')\nasync def update_label(request: Request, label_info: m.AddLabel):\n \"\"\"\n File Label Update\n\n :param request:\n :param y:\n :param label:\n :return:\n \"\"\"\n n_data = Train.filter(y=-1)\n request.state.inspect = frame()\n reet = n_data.update(auto_commit=True, **label_info.dict())\n print('2##########################################')\n return reet\n\n\[email protected]('/show_file')\nasync def show_file_data(request: Request, file_path):\n \"\"\"\n Ex_> D:/Project/pid/Scripts/fastapp/data/samples/pdf_sample2.pdf\n \"\"\"\n request.state.inspect = frame()\n file = loadFileManager(file_path)\n return file.data\n\n\[email protected]('/files/')\nasync def create_files(files: List[bytes]=File(...)):\n return {'file_sizes': [len(file) for file in files]}\n\n\[email protected]('/uploadfiles')\nasync def create_upload_files(request: Request, files: List[UploadFile]=\n File(...), session: Session=Depends(db.session)):\n \"\"\"\n params: Files \n\n return: Last File's \n\n [\n {\n page:1\n td: dfsdf\n },{\n page:2\n td: asdasdasda\n }\n ]\n \"\"\"\n for file in files:\n contents = await file.read()\n print(os.path.join('./', file.filename))\n with open(UPLOAD_DIRECTORY + file.filename, 'wb') as fp:\n fp.write(contents)\n f = loadFileManager(UPLOAD_DIRECTORY + file.filename)\n try:\n obj = Files.create(session, auto_commit=False, name=f.name, ext\n =f.ext, ip_add=request.state.ip)\n for p in f.data:\n df = preprocess_reg(p['td'])\n Train.create(session, auto_commit=True, file_id=obj.id, y=-\n 1, page=p['page'] + 1, text_data=p['td'], reg_count=int\n (df['reg_count'][0]), column1=int(df['col1'][0]),\n column2=int(df['col2'][0]), column3=int(df['col3'][0]),\n column4=int(df['col4'][0]), column5=int(df['col5'][0]),\n column6=int(df['col6'][0]), column7=int(df['col7'][0]),\n column8=int(df['col8'][0]), column9=int(df['col9'][0]),\n column10=int(df['col10'][0]))\n except Exception as e:\n raise ex.FileExtEx(file.filename)\n return f.data\n", "step-3": "<mask token>\nrouter = APIRouter(prefix='/pid')\n\n\[email protected]('/getIsPID', response_model=List[m.GetIsPID])\nasync def show_data(request: Request, ispid):\n \"\"\"\n no params\n\n :return\n\n [\n\n {\n\n id: int = None\n\n name: str = None\n\n ext: str = None\n\n is_pid: bool = False\n\n },{\n\n ...\n\n }\n\n ]\n\n \"\"\"\n request.state.inspect = frame()\n print('### state.user : ', request.state.user)\n print('### state.inspect : ', request.state.inspect)\n print('###', request.url.hostname + request.url.path)\n print('###', request.state.ip)\n result = Files.filter(is_pid=ispid).all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/getTrain')\nasync def get_train_data(request: Request, id: int):\n \"\"\"\n no params\n\n :return\n\n Train Model\n \"\"\"\n request.state.inspect = frame()\n result = Train.filter(file_id=id).order_by('id').all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/register/{file_path}', status_code=201)\nasync def input_data(file_path, request: Request, session: Session=Depends(\n db.session)):\n \"\"\"\n file path를 입력해서 해당 파일을 DB에 등록하는 함수\n 지금은 사용 안함\n \"\"\"\n print('start#########################################')\n request.state.inspect = frame()\n print(file_path)\n df = t_load_data(file_path)\n for row in df.itertuples():\n print(row)\n print(row.page)\n Train.create(session, auto_commit=True, page=row.page, reg_count=\n row.reg_count, column1=row.col1, column2=row.col2, column3=row.\n col3, column4=row.col4, column5=row.col5, column6=row.col6,\n column7=row.col7, column8=row.col8, column9=row.col9, column10=\n row.col10, y=-1)\n print('#########################################')\n return MessageOk()\n\n\[email protected]('/update_y')\nasync def update_label(request: Request, label_info: m.AddLabel):\n \"\"\"\n File Label Update\n\n :param request:\n :param y:\n :param label:\n :return:\n \"\"\"\n n_data = Train.filter(y=-1)\n request.state.inspect = frame()\n reet = n_data.update(auto_commit=True, **label_info.dict())\n print('2##########################################')\n return reet\n\n\[email protected]('/show_file')\nasync def show_file_data(request: Request, file_path):\n \"\"\"\n Ex_> D:/Project/pid/Scripts/fastapp/data/samples/pdf_sample2.pdf\n \"\"\"\n request.state.inspect = frame()\n file = loadFileManager(file_path)\n return file.data\n\n\[email protected]('/files/')\nasync def create_files(files: List[bytes]=File(...)):\n return {'file_sizes': [len(file) for file in files]}\n\n\[email protected]('/uploadfiles')\nasync def create_upload_files(request: Request, files: List[UploadFile]=\n File(...), session: Session=Depends(db.session)):\n \"\"\"\n params: Files \n\n return: Last File's \n\n [\n {\n page:1\n td: dfsdf\n },{\n page:2\n td: asdasdasda\n }\n ]\n \"\"\"\n for file in files:\n contents = await file.read()\n print(os.path.join('./', file.filename))\n with open(UPLOAD_DIRECTORY + file.filename, 'wb') as fp:\n fp.write(contents)\n f = loadFileManager(UPLOAD_DIRECTORY + file.filename)\n try:\n obj = Files.create(session, auto_commit=False, name=f.name, ext\n =f.ext, ip_add=request.state.ip)\n for p in f.data:\n df = preprocess_reg(p['td'])\n Train.create(session, auto_commit=True, file_id=obj.id, y=-\n 1, page=p['page'] + 1, text_data=p['td'], reg_count=int\n (df['reg_count'][0]), column1=int(df['col1'][0]),\n column2=int(df['col2'][0]), column3=int(df['col3'][0]),\n column4=int(df['col4'][0]), column5=int(df['col5'][0]),\n column6=int(df['col6'][0]), column7=int(df['col7'][0]),\n column8=int(df['col8'][0]), column9=int(df['col9'][0]),\n column10=int(df['col10'][0]))\n except Exception as e:\n raise ex.FileExtEx(file.filename)\n return f.data\n", "step-4": "from typing import List\nfrom uuid import uuid4\nfrom fastapi import APIRouter, Depends, FastAPI, File, UploadFile\nfrom sqlalchemy.orm import Session\nfrom starlette.requests import Request\nfrom Scripts.fastapp.common.consts import UPLOAD_DIRECTORY\nfrom Scripts.fastapp.database.conn import db\nfrom Scripts.fastapp.database.schema import Train, Files\nfrom Scripts.fastapp import models as m\nfrom Scripts.fastapp.errors import exceptions as ex\nimport string\nimport secrets\nfrom inspect import currentframe as frame\nfrom Scripts.fastapp.models import MessageOk, Test, Label\nfrom Scripts.fastapp.utils.file_module.test import t_load_data\nfrom Scripts.fastapp.utils.file_module.load_file_manager import loadFileManager\nfrom Scripts.fastapp.utils.preprocess_reg import preprocess_reg\nimport os\nrouter = APIRouter(prefix='/pid')\n\n\[email protected]('/getIsPID', response_model=List[m.GetIsPID])\nasync def show_data(request: Request, ispid):\n \"\"\"\n no params\n\n :return\n\n [\n\n {\n\n id: int = None\n\n name: str = None\n\n ext: str = None\n\n is_pid: bool = False\n\n },{\n\n ...\n\n }\n\n ]\n\n \"\"\"\n request.state.inspect = frame()\n print('### state.user : ', request.state.user)\n print('### state.inspect : ', request.state.inspect)\n print('###', request.url.hostname + request.url.path)\n print('###', request.state.ip)\n result = Files.filter(is_pid=ispid).all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/getTrain')\nasync def get_train_data(request: Request, id: int):\n \"\"\"\n no params\n\n :return\n\n Train Model\n \"\"\"\n request.state.inspect = frame()\n result = Train.filter(file_id=id).order_by('id').all()\n print('##RESULT##', result)\n return result\n\n\[email protected]('/register/{file_path}', status_code=201)\nasync def input_data(file_path, request: Request, session: Session=Depends(\n db.session)):\n \"\"\"\n file path를 입력해서 해당 파일을 DB에 등록하는 함수\n 지금은 사용 안함\n \"\"\"\n print('start#########################################')\n request.state.inspect = frame()\n print(file_path)\n df = t_load_data(file_path)\n for row in df.itertuples():\n print(row)\n print(row.page)\n Train.create(session, auto_commit=True, page=row.page, reg_count=\n row.reg_count, column1=row.col1, column2=row.col2, column3=row.\n col3, column4=row.col4, column5=row.col5, column6=row.col6,\n column7=row.col7, column8=row.col8, column9=row.col9, column10=\n row.col10, y=-1)\n print('#########################################')\n return MessageOk()\n\n\[email protected]('/update_y')\nasync def update_label(request: Request, label_info: m.AddLabel):\n \"\"\"\n File Label Update\n\n :param request:\n :param y:\n :param label:\n :return:\n \"\"\"\n n_data = Train.filter(y=-1)\n request.state.inspect = frame()\n reet = n_data.update(auto_commit=True, **label_info.dict())\n print('2##########################################')\n return reet\n\n\[email protected]('/show_file')\nasync def show_file_data(request: Request, file_path):\n \"\"\"\n Ex_> D:/Project/pid/Scripts/fastapp/data/samples/pdf_sample2.pdf\n \"\"\"\n request.state.inspect = frame()\n file = loadFileManager(file_path)\n return file.data\n\n\[email protected]('/files/')\nasync def create_files(files: List[bytes]=File(...)):\n return {'file_sizes': [len(file) for file in files]}\n\n\[email protected]('/uploadfiles')\nasync def create_upload_files(request: Request, files: List[UploadFile]=\n File(...), session: Session=Depends(db.session)):\n \"\"\"\n params: Files \n\n return: Last File's \n\n [\n {\n page:1\n td: dfsdf\n },{\n page:2\n td: asdasdasda\n }\n ]\n \"\"\"\n for file in files:\n contents = await file.read()\n print(os.path.join('./', file.filename))\n with open(UPLOAD_DIRECTORY + file.filename, 'wb') as fp:\n fp.write(contents)\n f = loadFileManager(UPLOAD_DIRECTORY + file.filename)\n try:\n obj = Files.create(session, auto_commit=False, name=f.name, ext\n =f.ext, ip_add=request.state.ip)\n for p in f.data:\n df = preprocess_reg(p['td'])\n Train.create(session, auto_commit=True, file_id=obj.id, y=-\n 1, page=p['page'] + 1, text_data=p['td'], reg_count=int\n (df['reg_count'][0]), column1=int(df['col1'][0]),\n column2=int(df['col2'][0]), column3=int(df['col3'][0]),\n column4=int(df['col4'][0]), column5=int(df['col5'][0]),\n column6=int(df['col6'][0]), column7=int(df['col7'][0]),\n column8=int(df['col8'][0]), column9=int(df['col9'][0]),\n column10=int(df['col10'][0]))\n except Exception as e:\n raise ex.FileExtEx(file.filename)\n return f.data\n", "step-5": "from typing import List\nfrom uuid import uuid4\n\nfrom fastapi import APIRouter, Depends, FastAPI, File, UploadFile\nfrom sqlalchemy.orm import Session\nfrom starlette.requests import Request\n\nfrom Scripts.fastapp.common.consts import UPLOAD_DIRECTORY\nfrom Scripts.fastapp.database.conn import db\n# from Scripts.fastapp.database.schema import Users, ApiKeys, ApiWhiteLists\nfrom Scripts.fastapp.database.schema import Train, Files\nfrom Scripts.fastapp import models as m\nfrom Scripts.fastapp.errors import exceptions as ex\nimport string\nimport secrets\nfrom inspect import currentframe as frame\n\nfrom Scripts.fastapp.models import MessageOk, Test, Label\nfrom Scripts.fastapp.utils.file_module.test import t_load_data\n\nfrom Scripts.fastapp.utils.file_module.load_file_manager import loadFileManager\nfrom Scripts.fastapp.utils.preprocess_reg import preprocess_reg\n\nimport os\n\n\nrouter = APIRouter(prefix='/pid')\n\n\[email protected]('/getIsPID', response_model=List[m.GetIsPID])\n# @router.get('')\nasync def show_data(request: Request, ispid):\n \"\"\"\n no params\\n\n :return\\n\n [\\n\n {\\n\n id: int = None\\n\n name: str = None\\n\n ext: str = None\\n\n is_pid: bool = False\\n\n },{\\n\n ...\\n\n }\\n\n ]\\n\n \"\"\"\n request.state.inspect = frame()\n print(\"### state.user : \", request.state.user)\n print(\"### state.inspect : \", request.state.inspect)\n print(\"###\", request.url.hostname + request.url.path )\n print(\"###\", request.state.ip)\n result = Files.filter(is_pid=ispid).all()\n \n print(\"##RESULT##\", result)\n # return dict(id=result[0].id, reg_count=result[0].reg_count)\n return result\n\[email protected]('/getTrain')\nasync def get_train_data(request: Request, id: int):\n \"\"\"\n no params\\n\n :return\\n\n Train Model\n \"\"\"\n request.state.inspect = frame()\n result = Train.filter(file_id=id).order_by(\"id\").all()\n \n print(\"##RESULT##\", result)\n # return dict(id=result[0].id, reg_count=result[0].reg_count)\n return result\n\n\n# @router.post(\"/register\", status_code=201, response_model=Label)\[email protected](\"/register/{file_path}\", status_code=201)\nasync def input_data(file_path ,request: Request, session: Session = Depends(db.session)):\n \"\"\"\n file path를 입력해서 해당 파일을 DB에 등록하는 함수\n 지금은 사용 안함\n \"\"\"\n print(\"start#########################################\")\n request.state.inspect = frame()\n print(file_path)\n df = t_load_data(file_path)\n for row in df.itertuples():\n print(row)\n print(row.page)\n Train.create(session, auto_commit=True,page=row.page ,reg_count=row.reg_count, column1=row.col1, column2=row.col2,column3=row.col3,column4=row.col4,column5=row.col5,column6=row.col6,column7=row.col7,column8=row.col8,column9=row.col9,column10=row.col10, y=-1)\n\n # d = Train.create(session, auto_commit=True, reg_count=3, column3=1, column7=1, y=1)\n # print(d.reg_count, d.id)\n print(\"#########################################\")\n return MessageOk()\n\n\[email protected]('/update_y')\nasync def update_label(request: Request, label_info: m.AddLabel):\n \"\"\"\n File Label Update\\n\n :param request:\n :param y:\n :param label:\n :return:\n \"\"\"\n # user = request.state.user\n n_data = Train.filter(y= -1)\n request.state.inspect = frame()\n\n reet = n_data.update(auto_commit=True, **label_info.dict())\n print(\"2##########################################\")\n return reet\n \[email protected]('/show_file')\nasync def show_file_data(request:Request, file_path):\n \"\"\"\n Ex_> D:/Project/pid/Scripts/fastapp/data/samples/pdf_sample2.pdf\n \"\"\"\n request.state.inspect = frame()\n\n # file type: Dictionary\n file = loadFileManager(file_path)\n return file.data\n\[email protected](\"/files/\")\nasync def create_files(files: List[bytes] = File(...)):\n return {\"file_sizes\": [len(file) for file in files]}\n \n\[email protected](\"/uploadfiles\")\nasync def create_upload_files(request: Request, files: List[UploadFile] = File(...), session: Session = Depends(db.session)):\n \"\"\"\n params: Files \\n\n return: Last File's \\n\n [\n {\n page:1\n td: dfsdf\n },{\n page:2\n td: asdasdasda\n }\n ]\n \"\"\"\n for file in files:\n contents = await file.read()\n print(os.path.join('./', file.filename))\n # with open(os.path.join('./', file.filename), \"wb\") as fp:\n with open(UPLOAD_DIRECTORY + file.filename, \"wb\") as fp:\n fp.write(contents)\n f = loadFileManager(UPLOAD_DIRECTORY + file.filename)\n try:\n obj = Files.create(session, auto_commit=False, name=f.name, ext=f.ext, ip_add= request.state.ip )\n # print(obj.id, f.name, f.ext, f.data)\n\n for p in f.data:\n df = preprocess_reg(p[\"td\"])\n Train.create(session, auto_commit=True, file_id=obj.id ,y=-1, page=p[\"page\"]+1, text_data=p[\"td\"],\n reg_count=int(df[\"reg_count\"][0]), column1=int(df[\"col1\"][0]), column2=int(df[\"col2\"][0]),\n column3=int(df[\"col3\"][0]),column4=int(df[\"col4\"][0]),column5=int(df[\"col5\"][0]),column6=int(df[\"col6\"][0]),\n column7=int(df[\"col7\"][0]),column8=int(df[\"col8\"][0]),column9=int(df[\"col9\"][0]),column10=int(df[\"col10\"][0])\n )\n\n except Exception as e:\n raise ex.FileExtEx(file.filename)\n\n # 마지막 파일 f.data\n return f.data\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from django import urls from django.urls import path from genius.views import (home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name, Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search) app_name = 'genius' urlpatterns = [ path('', home, name='home'), path('class/', Classes, name='class'), path('class/add-name', Add_name, name='add-name'), path('class/create', Class_create, name='create-class'), path('class/<int:id>', Class_Detail, name='detail'), path('class/<int:id>/edit/', Class_Update, name='update'), path('class/<int:id>/delete/', Class_Delete, name='delete'), path('stds/', Student_Main, name='stds'), path('stds/create', Student_Create, name='stds-new'), path('stds/<int:id>',Student_Detail , name='std-detail'), path('stds/search/',Search , name='std-search'), path('stds/<int:id>/edit/', Student_Update, name='std-update'), path('stds/<int:id>/delete/', Student_Delete, name='std-delete'), ]
normal
{ "blob_id": "fd6a32652b845b2a6d6d8934c0dde91afdddd9f3", "index": 9046, "step-1": "<mask token>\n", "step-2": "<mask token>\napp_name = 'genius'\nurlpatterns = [path('', home, name='home'), path('class/', Classes, name=\n 'class'), path('class/add-name', Add_name, name='add-name'), path(\n 'class/create', Class_create, name='create-class'), path(\n 'class/<int:id>', Class_Detail, name='detail'), path(\n 'class/<int:id>/edit/', Class_Update, name='update'), path(\n 'class/<int:id>/delete/', Class_Delete, name='delete'), path('stds/',\n Student_Main, name='stds'), path('stds/create', Student_Create, name=\n 'stds-new'), path('stds/<int:id>', Student_Detail, name='std-detail'),\n path('stds/search/', Search, name='std-search'), path(\n 'stds/<int:id>/edit/', Student_Update, name='std-update'), path(\n 'stds/<int:id>/delete/', Student_Delete, name='std-delete')]\n", "step-3": "from django import urls\nfrom django.urls import path\nfrom genius.views import home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name, Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search\napp_name = 'genius'\nurlpatterns = [path('', home, name='home'), path('class/', Classes, name=\n 'class'), path('class/add-name', Add_name, name='add-name'), path(\n 'class/create', Class_create, name='create-class'), path(\n 'class/<int:id>', Class_Detail, name='detail'), path(\n 'class/<int:id>/edit/', Class_Update, name='update'), path(\n 'class/<int:id>/delete/', Class_Delete, name='delete'), path('stds/',\n Student_Main, name='stds'), path('stds/create', Student_Create, name=\n 'stds-new'), path('stds/<int:id>', Student_Detail, name='std-detail'),\n path('stds/search/', Search, name='std-search'), path(\n 'stds/<int:id>/edit/', Student_Update, name='std-update'), path(\n 'stds/<int:id>/delete/', Student_Delete, name='std-delete')]\n", "step-4": "from django import urls\nfrom django.urls import path\nfrom genius.views import (home, Class_create, Class_Update, Class_Delete, Class_Detail, Classes, Add_name,\n Student_Main, Student_Create, Student_Update, Student_Delete, Student_Detail, Search)\n\napp_name = 'genius'\n\nurlpatterns = [\n path('', home, name='home'),\n path('class/', Classes, name='class'),\n path('class/add-name', Add_name, name='add-name'),\n path('class/create', Class_create, name='create-class'),\n path('class/<int:id>', Class_Detail, name='detail'),\n path('class/<int:id>/edit/', Class_Update, name='update'),\n path('class/<int:id>/delete/', Class_Delete, name='delete'),\n path('stds/', Student_Main, name='stds'),\n path('stds/create', Student_Create, name='stds-new'),\n path('stds/<int:id>',Student_Detail , name='std-detail'),\n path('stds/search/',Search , name='std-search'),\n path('stds/<int:id>/edit/', Student_Update, name='std-update'),\n path('stds/<int:id>/delete/', Student_Delete, name='std-delete'),\n]\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import mcvine.cli from numpy import array from mcvine_workflow.singlextal.resolution import use_res_comps as urc beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons' instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter') samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml' psi = -0.005846744654920276 hkl2Q = array([[-0.65520642, 0.93819023, 0. ], [ 0.66340068, 0.4633009 , -0.80916512], [-0.66340068, -0.4633009 , -0.80916512]]) pp = array([-0.88585691, 2.86622706, -0.61241657]) pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0])) t_m2p = 0.0071883434093180376 Q = array([ 4.75696626, -3.03446862, 0.64836415]) E = 8.4494171829103024 hkl_projection = array([ 0.70608101, 0.61545409, 0.14251389]) urc.run( beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p, Q, E, hkl_projection, Nbuffer=100000)
normal
{ "blob_id": "de286b94e09db477e3d920a9eff1a299474baf20", "index": 2614, "step-1": "<mask token>\n", "step-2": "<mask token>\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-3": "<mask token>\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-4": "import mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = (\n '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\n )\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = (\n '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\n )\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0.0], [0.66340068, 0.4633009, -\n 0.80916512], [-0.66340068, -0.4633009, -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2],\n pp[0]))\nt_m2p = 0.007188343409318038\nQ = array([4.75696626, -3.03446862, 0.64836415])\nE = 8.449417182910302\nhkl_projection = array([0.70608101, 0.61545409, 0.14251389])\nurc.run(beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel,\n t_m2p, Q, E, hkl_projection, Nbuffer=100000)\n", "step-5": "#!/usr/bin/env python\nimport mcvine.cli\nfrom numpy import array\nfrom mcvine_workflow.singlextal.resolution import use_res_comps as urc\nbeam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_30_1e9/out/neutrons'\ninstrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')\nsamplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_30/E8.44941718291_hkl-4.55419640541,0.935679515453,-1.73695496948/sample/sampleassembly.xml'\npsi = -0.005846744654920276\nhkl2Q = array([[-0.65520642, 0.93819023, 0. ],\n [ 0.66340068, 0.4633009 , -0.80916512],\n [-0.66340068, -0.4633009 , -0.80916512]])\npp = array([-0.88585691, 2.86622706, -0.61241657])\npixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))\nt_m2p = 0.0071883434093180376\nQ = array([ 4.75696626, -3.03446862, 0.64836415])\nE = 8.4494171829103024\nhkl_projection = array([ 0.70608101, 0.61545409, 0.14251389])\nurc.run(\n beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,\n Q, E, hkl_projection, Nbuffer=100000)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*-" """ getMetaStream action for graphingwiki - alternative meta retrieval action that uses abuse-sa query language for filtering metas and returns Line Delimeted JSON or event-stream @copyright: 2015 Lauri Pokka <[email protected]> @license: MIT <http://www.opensource.org/licenses/mit-license.php> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from graphingwiki import values_to_form from graphingwiki.editing import iter_metas try: import simplejson as json except ImportError: import json def metas_to_json(req, q): def flatten(arr): if len(arr) == 1: return arr[0] else: return arr for page, metas in iter_metas(req, q): flattened = [(key, flatten(val)) for key, val in metas.items()] yield json.dumps(dict(flattened + [('gwikipagename', page)])) class MetaStreamer(object): def __init__(self, iterator): self.iterator = iterator self.done = False def read(self, *args): if not self.done: try: row = self.iterator.next() return "data: " + row + "\n\n" except StopIteration: self.done = True return "event: done\ndata: \n\n" else: return None def close(self): self.done = True def execute(pagename, request): form = values_to_form(request.values) query = form.get('q', [None])[0] output_format = form.get('type', [""])[0] try: json_rows = metas_to_json(request, query) accepts = unicode(request.request.accept_mimetypes) if output_format == "stream" or "text/event-stream" in accepts: request.content_type = "text/event-stream" ## send_file seems to be the least hacky way ## for sending streamed content in MoinMoin request.send_file(MetaStreamer(json_rows)) else: request.content_type = "application/json;boundary=NL" for row in json_rows: request.write(row + "\n") except ImportError: request.status_code = 501 request.write(u"abusehelper package not available") except ValueError: request.status_code = 400 request.write(u"invalid query '" + query + u"'")
normal
{ "blob_id": "c67cd3c16c15d6aab02a07736c83bbdd5bd98514", "index": 1839, "step-1": "<mask token>\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n", "step-3": "<mask token>\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n", "step-4": "<mask token>\nfrom graphingwiki import values_to_form\nfrom graphingwiki.editing import iter_metas\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return 'data: ' + row + '\\n\\n'\n except StopIteration:\n self.done = True\n return 'event: done\\ndata: \\n\\n'\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [''])[0]\n try:\n json_rows = metas_to_json(request, query)\n accepts = unicode(request.request.accept_mimetypes)\n if output_format == 'stream' or 'text/event-stream' in accepts:\n request.content_type = 'text/event-stream'\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = 'application/json;boundary=NL'\n for row in json_rows:\n request.write(row + '\\n')\n except ImportError:\n request.status_code = 501\n request.write(u'abusehelper package not available')\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")\n", "step-5": "# -*- coding: utf-8 -*-\"\n\"\"\"\n getMetaStream action for graphingwiki\n - alternative meta retrieval action that uses\n abuse-sa query language for filtering metas\n and returns Line Delimeted JSON or event-stream\n\n @copyright: 2015 Lauri Pokka <[email protected]>\n @license: MIT <http://www.opensource.org/licenses/mit-license.php>\n\n Permission is hereby granted, free of charge, to any person\n obtaining a copy of this software and associated documentation\n files (the \"Software\"), to deal in the Software without\n restriction, including without limitation the rights to use, copy,\n modify, merge, publish, distribute, sublicense, and/or sell copies\n of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n DEALINGS IN THE SOFTWARE.\n\n\"\"\"\n\nfrom graphingwiki import values_to_form\nfrom graphingwiki.editing import iter_metas\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\n\ndef metas_to_json(req, q):\n def flatten(arr):\n if len(arr) == 1:\n return arr[0]\n else:\n return arr\n\n for page, metas in iter_metas(req, q):\n flattened = [(key, flatten(val)) for key, val in metas.items()]\n yield json.dumps(dict(flattened + [('gwikipagename', page)]))\n\n\nclass MetaStreamer(object):\n def __init__(self, iterator):\n self.iterator = iterator\n self.done = False\n\n def read(self, *args):\n if not self.done:\n try:\n row = self.iterator.next()\n return \"data: \" + row + \"\\n\\n\"\n except StopIteration:\n self.done = True\n return \"event: done\\ndata: \\n\\n\"\n else:\n return None\n\n def close(self):\n self.done = True\n\n\ndef execute(pagename, request):\n form = values_to_form(request.values)\n query = form.get('q', [None])[0]\n output_format = form.get('type', [\"\"])[0]\n try:\n json_rows = metas_to_json(request, query)\n\n accepts = unicode(request.request.accept_mimetypes)\n\n if output_format == \"stream\" or \"text/event-stream\" in accepts:\n request.content_type = \"text/event-stream\"\n\n ## send_file seems to be the least hacky way\n ## for sending streamed content in MoinMoin\n request.send_file(MetaStreamer(json_rows))\n else:\n request.content_type = \"application/json;boundary=NL\"\n\n for row in json_rows:\n request.write(row + \"\\n\")\n\n except ImportError:\n request.status_code = 501\n request.write(u\"abusehelper package not available\")\n except ValueError:\n request.status_code = 400\n request.write(u\"invalid query '\" + query + u\"'\")", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
from pptx import Presentation import csv prs = Presentation() slide_layout = prs.slide_layouts[1] slide = prs.slides.add_slide(slide_layout) shapes = slide.shapes title_shape = shapes.title body_shape = shapes.placeholders[1] title_shape.text = "Tekst" tf = body_shape.text_frame tf.text = "Zawartość tekst frame" with open("report.csv") as csvfile: data = csv.reader(csvfile, delimiter=',') for row in data: p = tf.add_paragraph() p.text = row[0] p.level = 1 p = tf.add_paragraph() p.text = row[1] p.level = 2 prs.save("raport.pptx")
normal
{ "blob_id": "e1f003b6a687e5654a1ee6c595e789ced02cd6c3", "index": 7086, "step-1": "<mask token>\n", "step-2": "<mask token>\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n", "step-3": "<mask token>\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n", "step-4": "from pptx import Presentation\nimport csv\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\ntitle_shape = shapes.title\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = 'Tekst'\ntf = body_shape.text_frame\ntf.text = 'Zawartość tekst frame'\nwith open('report.csv') as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\nprs.save('raport.pptx')\n", "step-5": "from pptx import Presentation\nimport csv\n\nprs = Presentation()\nslide_layout = prs.slide_layouts[1]\nslide = prs.slides.add_slide(slide_layout)\nshapes = slide.shapes\n\ntitle_shape = shapes.title\n\nbody_shape = shapes.placeholders[1]\ntitle_shape.text = \"Tekst\"\n\ntf = body_shape.text_frame\ntf.text = \"Zawartość tekst frame\"\nwith open(\"report.csv\") as csvfile:\n data = csv.reader(csvfile, delimiter=',')\n for row in data:\n p = tf.add_paragraph()\n p.text = row[0]\n p.level = 1\n\n p = tf.add_paragraph()\n p.text = row[1]\n p.level = 2\n\nprs.save(\"raport.pptx\")\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from config import Config import numpy as np from itertools import product from sklearn.utils import shuffle from sklearn.metrics import precision_recall_fscore_support from keras import callbacks, regularizers from keras.models import Sequential from keras.layers import Dense, InputLayer from keras import backend as K from keras.wrappers.scikit_learn import KerasClassifier from sklearn.model_selection import StratifiedKFold, cross_val_score from src.classification_data_tools import limit_negative_samples import pickle from tensorflow import set_random_seed import tensorflow as tf cfg = Config() def FetchData(cfg): with open(cfg.FILE, 'rb') as f: data = pickle.load(f) if cfg.SHUFFLE: features, targets = shuffle(data[0], data[1]) else: features = data[0] targets = data[1] training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1] training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1] test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):] test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):] if cfg.NEGATIVE_SAMPLES_RATIO != 0: training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO) return training_features, training_targets, test_features, test_targets def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function): if regularizer == 'l1': regularizer = regularizers.l1(0.05) elif regularizer == 'l2': regularizer = regularizers.l2(0.05) elif regularizer == 'none': regularizer = None model = Sequential() model.add(InputLayer(input_shape)) if iftest: for layer in hidden_layers: model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function)) else: for layer in cfg.HIDDEN_LAYERS: model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION)) model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid')) model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy']) return model def TrainModel(cfg, model, training_features, training_targets, cw): if cfg.EARLY_STOPPING: es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min') model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1, validation_split=1 - cfg.TRAINING_CUT) else: model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1, validation_split=1 - cfg.TRAINING_CUT) return model def EvaluateModel(cfg, model, test_features, test_targets): predictions = model.predict(test_features) for prediction in predictions: if prediction[0] < 0.5: prediction[0] = 0 else: prediction[0] = 1 precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro') f1 = 2 * ((precision * recall) / (precision + recall)) print(str(precision) + ', ' + str(recall) + ', ' + str(f1)) def reset_keras(): sess = K.get_session() K.clear_session() sess.close() sess = K.get_session() np.random.seed(1) tf.set_random_seed(2) def EvaluateModelTest(cfg, model, test_features, test_targets): predictions = model.predict(test_features) for prediction in predictions: if prediction[0] < 0.5: prediction[0] = 0 else: prediction[0] = 1 precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro') f1 = 2 * ((precision * recall) / (precision + recall)) return precision, recall, f1 #estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1) #kfold = StratifiedKFold(n_splits=10, shuffle=True) #results = cross_val_score(estimator, test_features, test_targets, cv=kfold) #print("Results: %.2f%% (%.2f%%)" % (results.mean() * 100, results.std() * 100)) training_X, training_y, test_X, test_Y = FetchData(cfg) training_features = np.array(training_X) training_targets = np.array(training_y) test_features = np.array(test_X) test_targets = np.array(test_Y) input_shape = (len(training_features[0]),) if cfg.MULTIPLE_ARCHITECTURES: best_architecture = [] best_regularizer = '' best_activation_function = '' best_precision = 0 best_recall = 0 best_f1 = 0 count_max = 0 counter = 0 architecture_list = [] for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1): prod = list(product(cfg.TEST_NOTES, repeat = i)) architecture_list.extend(prod) count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS) with open('output/wrapper_test_mean.csv', 'a') as f: f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n') for architecture in architecture_list: for regularizer in cfg.TEST_REGULARIZERS: for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS: for class_weight in cfg.TEST_CLASS_WEIGHTS: reset_keras() print(str(counter) + '/' + str(count_max)) model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function) model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight}) precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets) if recall > best_recall: best_precision = precision best_recall = recall best_f1 = f1 best_architecture = list(architecture) best_regularizer = regularizer best_activation_function = activation_function la1 = list(architecture)[0] la2 = 0 la3 = 0 la4 = 0 la5 = 0 if len(list(architecture)) >= 2: la2 = list(architecture)[1] if len(list(architecture)) >= 3: la3 = list(architecture)[2] if len(list(architecture)) >= 4: la4 = list(architecture)[3] if len(list(architecture)) >= 5: la5 = list(architecture)[4] f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\n') counter += 1 print('BEST ARCHITECTURE:') print(best_architecture) print(best_regularizer) print(best_activation_function) print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1)) else: reset_keras() model = BuildModel(cfg, input_shape, False, 0, 0, 0) model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT) EvaluateModel(cfg, model, test_features, test_targets)
normal
{ "blob_id": "957e18b2536cda69ba1db571d0308d5e392fe488", "index": 2166, "step-1": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\n<mask token>\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\n<mask token>\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n", "step-4": "<mask token>\ncfg = Config()\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(\n training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO\n )\n return training_features, training_targets, test_features, test_targets\n\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,\n activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n model = Sequential()\n model.add(InputLayer(input_shape))\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=\n cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n return model\n\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,\n patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,\n verbose=1, validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS,\n class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n return model\n\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n precision, recall, fscore, support = precision_recall_fscore_support(\n test_targets, predictions, average='macro')\n f1 = 2 * (precision * recall / (precision + recall))\n return precision, recall, f1\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\ninput_shape = len(training_features[0]),\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n count_max = 0\n counter = 0\n architecture_list = []\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat=i))\n architecture_list.extend(prod)\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg\n .TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n print(str(counter) + '/' + str(count_max))\n model = BuildModel(cfg, input_shape, True, list(\n architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model,\n training_features, training_targets, {(0): 1.0,\n (1): class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg,\n model_trained, test_features, test_targets)\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +\n ',' + str(la4) + ',' + str(la5) + ',' + str(\n class_weight) + ',' + regularizer + ',' +\n activation_function + ',' + str(precision) +\n ',' + str(recall) + ',' + str(f1) + '\\n')\n counter += 1\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(\n best_recall) + ', f1: ' + str(best_f1))\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg\n .CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n", "step-5": "from config import Config\nimport numpy as np\nfrom itertools import product\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom keras import callbacks, regularizers\nfrom keras.models import Sequential\nfrom keras.layers import Dense, InputLayer\nfrom keras import backend as K\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom sklearn.model_selection import StratifiedKFold, cross_val_score\nfrom src.classification_data_tools import limit_negative_samples\nimport pickle\nfrom tensorflow import set_random_seed\nimport tensorflow as tf\n\ncfg = Config()\n\n\n\ndef FetchData(cfg):\n with open(cfg.FILE, 'rb') as f:\n data = pickle.load(f)\n\n if cfg.SHUFFLE:\n features, targets = shuffle(data[0], data[1])\n else:\n features = data[0]\n targets = data[1]\n\n training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]\n training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]\n test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]\n test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]\n\n if cfg.NEGATIVE_SAMPLES_RATIO != 0:\n training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)\n\n return training_features, training_targets, test_features, test_targets\n\ndef BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):\n if regularizer == 'l1':\n regularizer = regularizers.l1(0.05)\n elif regularizer == 'l2':\n regularizer = regularizers.l2(0.05)\n elif regularizer == 'none':\n regularizer = None\n\n model = Sequential()\n\n model.add(InputLayer(input_shape))\n\n if iftest:\n for layer in hidden_layers:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))\n else:\n for layer in cfg.HIDDEN_LAYERS:\n model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))\n\n model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))\n\n model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])\n\n return model\n\ndef TrainModel(cfg, model, training_features, training_targets, cw):\n if cfg.EARLY_STOPPING:\n es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')\n\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n else:\n model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,\n batch_size=cfg.BATCH_SIZE, verbose=1,\n validation_split=1 - cfg.TRAINING_CUT)\n\n return model\n\ndef EvaluateModel(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n print(str(precision) + ', ' + str(recall) + ', ' + str(f1))\n\n\ndef reset_keras():\n sess = K.get_session()\n K.clear_session()\n sess.close()\n sess = K.get_session()\n np.random.seed(1)\n tf.set_random_seed(2)\n\ndef EvaluateModelTest(cfg, model, test_features, test_targets):\n predictions = model.predict(test_features)\n\n for prediction in predictions:\n if prediction[0] < 0.5:\n prediction[0] = 0\n else:\n prediction[0] = 1\n\n precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')\n f1 = 2 * ((precision * recall) / (precision + recall))\n return precision, recall, f1\n\n #estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)\n #kfold = StratifiedKFold(n_splits=10, shuffle=True)\n #results = cross_val_score(estimator, test_features, test_targets, cv=kfold)\n #print(\"Results: %.2f%% (%.2f%%)\" % (results.mean() * 100, results.std() * 100))\n\n\ntraining_X, training_y, test_X, test_Y = FetchData(cfg)\n\ntraining_features = np.array(training_X)\ntraining_targets = np.array(training_y)\ntest_features = np.array(test_X)\ntest_targets = np.array(test_Y)\n\ninput_shape = (len(training_features[0]),)\n\nif cfg.MULTIPLE_ARCHITECTURES:\n best_architecture = []\n best_regularizer = ''\n best_activation_function = ''\n best_precision = 0\n best_recall = 0\n best_f1 = 0\n\n count_max = 0\n counter = 0\n\n architecture_list = []\n\n for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):\n prod = list(product(cfg.TEST_NOTES, repeat = i))\n architecture_list.extend(prod)\n\n count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)\n\n with open('output/wrapper_test_mean.csv', 'a') as f:\n f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\\n')\n for architecture in architecture_list:\n for regularizer in cfg.TEST_REGULARIZERS:\n for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:\n for class_weight in cfg.TEST_CLASS_WEIGHTS:\n reset_keras()\n\n print(str(counter) + '/' + str(count_max))\n\n model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)\n model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})\n precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)\n\n if recall > best_recall:\n best_precision = precision\n best_recall = recall\n best_f1 = f1\n best_architecture = list(architecture)\n best_regularizer = regularizer\n best_activation_function = activation_function\n\n la1 = list(architecture)[0]\n la2 = 0\n la3 = 0\n la4 = 0\n la5 = 0\n\n\n if len(list(architecture)) >= 2:\n la2 = list(architecture)[1]\n if len(list(architecture)) >= 3:\n la3 = list(architecture)[2]\n if len(list(architecture)) >= 4:\n la4 = list(architecture)[3]\n if len(list(architecture)) >= 5:\n la5 = list(architecture)[4]\n\n f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\\n')\n\n counter += 1\n\n print('BEST ARCHITECTURE:')\n print(best_architecture)\n print(best_regularizer)\n print(best_activation_function)\n print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))\n\n\nelse:\n reset_keras()\n model = BuildModel(cfg, input_shape, False, 0, 0, 0)\n model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)\n EvaluateModel(cfg, model, test_features, test_targets)\n\n\n\n\n", "step-ids": [ 5, 6, 7, 8, 10 ] }
[ 5, 6, 7, 8, 10 ]
import inspect import threading from monitor.mutex import Mutex, mutex_hooks from monitor.condition import Condition, condition_hooks from monitor.shared_variables import SharedList, SharedDict, shared_auto, \ variable_hooks hooks = {} for h in [mutex_hooks, condition_hooks, variable_hooks]: hooks.update(h) def method_decorator(method): def wrapped(self, *args, **kwargs): # print(self, *args, **kwargs) self._mutex.acquire() for var in self._variables: var.apply_pending_changes() value = method(self, *args, **kwargs) for var in self._variables: var.sync() self._mutex.release() return value return wrapped class MonitorMeta(type): def __init__(cls, name, bases, attrs): super(MonitorMeta, cls).__init__(name, bases, attrs) for name, method in inspect.getmembers(cls, predicate=inspect.isfunction): if name not in ['wait', 'signal', 'register', 'shared', 'condition', '__init__', '__new__']: setattr(cls, name, method_decorator(method)) class ConditionWrapper: def __init__(self, condition, monitor): self.condition = condition self.monitor = monitor def wait(self): for var in self.monitor._variables: var.sync() self.condition.wait() for var in self.monitor._variables: var.apply_pending_changes() def signal(self): self.condition.signal() class MonitorBase(object, metaclass=MonitorMeta): _monitor_counter = 0 _variable_counter = 0 _condition_counter = 0 def __new__(cls, *args, **kwargs): obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs) cls._monitor_counter += 1 mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter) obj._mutex = Mutex(mutex_name) obj._variables = [] return obj def wait(self, condition): condition.wait() def signal(self, condition): condition.signal() def register(self, variables): self._variables.extend(variables) def shared(self, data): self.__class__._variable_counter += 1 name = 'variable-{}-{}'.format(self.__class__.__name__, self.__class__._variable_counter) var = shared_auto(name, data) self._variables.append(var) return var def condition(self): self.__class__._condition_counter += 1 name = 'condition-{}-{}'.format(self.__class__.__name__, self.__class__._condition_counter) c = ConditionWrapper(Condition(self._mutex, name), self) return c class Monitor(MonitorBase): def __init__(self): # self.s1 = SharedList('s1', [1,2,3]) # self.register([self.s1]) self.s1 = self.shared([1,2,3]) self.c = self.condition() def test(self): self.wait("aaa") print("test") self.signal("aaa") return 1 def abc(self): print("abc") return 2 def seq(self): for i in range(10): print(rank, i) def list_append(self, elem): self.s1.append(elem) def list_print(self): print(self.s1) if __name__ == '__main__': import time from monitor.main import event_loop, send_exit m = Monitor() event_loop_thread = threading.Thread(target=event_loop, args=(hooks,)) event_loop_thread.start() # print(m._mutex) # while True: # m.seq() m.list_append(5) time.sleep(1) m.list_print() send_exit() event_loop_thread.join()
normal
{ "blob_id": "80d49b24a2233569a340cee918393b1663c3d55d", "index": 4598, "step-1": "<mask token>\n\n\nclass ConditionWrapper:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MonitorBase(object, metaclass=MonitorMeta):\n _monitor_counter = 0\n _variable_counter = 0\n _condition_counter = 0\n\n def __new__(cls, *args, **kwargs):\n obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)\n cls._monitor_counter += 1\n mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)\n obj._mutex = Mutex(mutex_name)\n obj._variables = []\n return obj\n\n def wait(self, condition):\n condition.wait()\n\n def signal(self, condition):\n condition.signal()\n\n def register(self, variables):\n self._variables.extend(variables)\n\n def shared(self, data):\n self.__class__._variable_counter += 1\n name = 'variable-{}-{}'.format(self.__class__.__name__, self.\n __class__._variable_counter)\n var = shared_auto(name, data)\n self._variables.append(var)\n return var\n\n def condition(self):\n self.__class__._condition_counter += 1\n name = 'condition-{}-{}'.format(self.__class__.__name__, self.\n __class__._condition_counter)\n c = ConditionWrapper(Condition(self._mutex, name), self)\n return c\n\n\nclass Monitor(MonitorBase):\n\n def __init__(self):\n self.s1 = self.shared([1, 2, 3])\n self.c = self.condition()\n\n def test(self):\n self.wait('aaa')\n print('test')\n self.signal('aaa')\n return 1\n\n def abc(self):\n print('abc')\n return 2\n\n def seq(self):\n for i in range(10):\n print(rank, i)\n\n def list_append(self, elem):\n self.s1.append(elem)\n\n def list_print(self):\n print(self.s1)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ConditionWrapper:\n\n def __init__(self, condition, monitor):\n self.condition = condition\n self.monitor = monitor\n <mask token>\n <mask token>\n\n\nclass MonitorBase(object, metaclass=MonitorMeta):\n _monitor_counter = 0\n _variable_counter = 0\n _condition_counter = 0\n\n def __new__(cls, *args, **kwargs):\n obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)\n cls._monitor_counter += 1\n mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)\n obj._mutex = Mutex(mutex_name)\n obj._variables = []\n return obj\n\n def wait(self, condition):\n condition.wait()\n\n def signal(self, condition):\n condition.signal()\n\n def register(self, variables):\n self._variables.extend(variables)\n\n def shared(self, data):\n self.__class__._variable_counter += 1\n name = 'variable-{}-{}'.format(self.__class__.__name__, self.\n __class__._variable_counter)\n var = shared_auto(name, data)\n self._variables.append(var)\n return var\n\n def condition(self):\n self.__class__._condition_counter += 1\n name = 'condition-{}-{}'.format(self.__class__.__name__, self.\n __class__._condition_counter)\n c = ConditionWrapper(Condition(self._mutex, name), self)\n return c\n\n\nclass Monitor(MonitorBase):\n\n def __init__(self):\n self.s1 = self.shared([1, 2, 3])\n self.c = self.condition()\n\n def test(self):\n self.wait('aaa')\n print('test')\n self.signal('aaa')\n return 1\n\n def abc(self):\n print('abc')\n return 2\n\n def seq(self):\n for i in range(10):\n print(rank, i)\n\n def list_append(self, elem):\n self.s1.append(elem)\n\n def list_print(self):\n print(self.s1)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ConditionWrapper:\n\n def __init__(self, condition, monitor):\n self.condition = condition\n self.monitor = monitor\n <mask token>\n\n def signal(self):\n self.condition.signal()\n\n\nclass MonitorBase(object, metaclass=MonitorMeta):\n _monitor_counter = 0\n _variable_counter = 0\n _condition_counter = 0\n\n def __new__(cls, *args, **kwargs):\n obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)\n cls._monitor_counter += 1\n mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)\n obj._mutex = Mutex(mutex_name)\n obj._variables = []\n return obj\n\n def wait(self, condition):\n condition.wait()\n\n def signal(self, condition):\n condition.signal()\n\n def register(self, variables):\n self._variables.extend(variables)\n\n def shared(self, data):\n self.__class__._variable_counter += 1\n name = 'variable-{}-{}'.format(self.__class__.__name__, self.\n __class__._variable_counter)\n var = shared_auto(name, data)\n self._variables.append(var)\n return var\n\n def condition(self):\n self.__class__._condition_counter += 1\n name = 'condition-{}-{}'.format(self.__class__.__name__, self.\n __class__._condition_counter)\n c = ConditionWrapper(Condition(self._mutex, name), self)\n return c\n\n\nclass Monitor(MonitorBase):\n\n def __init__(self):\n self.s1 = self.shared([1, 2, 3])\n self.c = self.condition()\n\n def test(self):\n self.wait('aaa')\n print('test')\n self.signal('aaa')\n return 1\n\n def abc(self):\n print('abc')\n return 2\n\n def seq(self):\n for i in range(10):\n print(rank, i)\n\n def list_append(self, elem):\n self.s1.append(elem)\n\n def list_print(self):\n print(self.s1)\n\n\n<mask token>\n", "step-4": "<mask token>\nfor h in [mutex_hooks, condition_hooks, variable_hooks]:\n hooks.update(h)\n\n\ndef method_decorator(method):\n\n def wrapped(self, *args, **kwargs):\n self._mutex.acquire()\n for var in self._variables:\n var.apply_pending_changes()\n value = method(self, *args, **kwargs)\n for var in self._variables:\n var.sync()\n self._mutex.release()\n return value\n return wrapped\n\n\nclass MonitorMeta(type):\n\n def __init__(cls, name, bases, attrs):\n super(MonitorMeta, cls).__init__(name, bases, attrs)\n for name, method in inspect.getmembers(cls, predicate=inspect.\n isfunction):\n if name not in ['wait', 'signal', 'register', 'shared',\n 'condition', '__init__', '__new__']:\n setattr(cls, name, method_decorator(method))\n\n\nclass ConditionWrapper:\n\n def __init__(self, condition, monitor):\n self.condition = condition\n self.monitor = monitor\n\n def wait(self):\n for var in self.monitor._variables:\n var.sync()\n self.condition.wait()\n for var in self.monitor._variables:\n var.apply_pending_changes()\n\n def signal(self):\n self.condition.signal()\n\n\nclass MonitorBase(object, metaclass=MonitorMeta):\n _monitor_counter = 0\n _variable_counter = 0\n _condition_counter = 0\n\n def __new__(cls, *args, **kwargs):\n obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)\n cls._monitor_counter += 1\n mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)\n obj._mutex = Mutex(mutex_name)\n obj._variables = []\n return obj\n\n def wait(self, condition):\n condition.wait()\n\n def signal(self, condition):\n condition.signal()\n\n def register(self, variables):\n self._variables.extend(variables)\n\n def shared(self, data):\n self.__class__._variable_counter += 1\n name = 'variable-{}-{}'.format(self.__class__.__name__, self.\n __class__._variable_counter)\n var = shared_auto(name, data)\n self._variables.append(var)\n return var\n\n def condition(self):\n self.__class__._condition_counter += 1\n name = 'condition-{}-{}'.format(self.__class__.__name__, self.\n __class__._condition_counter)\n c = ConditionWrapper(Condition(self._mutex, name), self)\n return c\n\n\nclass Monitor(MonitorBase):\n\n def __init__(self):\n self.s1 = self.shared([1, 2, 3])\n self.c = self.condition()\n\n def test(self):\n self.wait('aaa')\n print('test')\n self.signal('aaa')\n return 1\n\n def abc(self):\n print('abc')\n return 2\n\n def seq(self):\n for i in range(10):\n print(rank, i)\n\n def list_append(self, elem):\n self.s1.append(elem)\n\n def list_print(self):\n print(self.s1)\n\n\nif __name__ == '__main__':\n import time\n from monitor.main import event_loop, send_exit\n m = Monitor()\n event_loop_thread = threading.Thread(target=event_loop, args=(hooks,))\n event_loop_thread.start()\n m.list_append(5)\n time.sleep(1)\n m.list_print()\n send_exit()\n event_loop_thread.join()\n", "step-5": "import inspect\nimport threading\n\nfrom monitor.mutex import Mutex, mutex_hooks\nfrom monitor.condition import Condition, condition_hooks\nfrom monitor.shared_variables import SharedList, SharedDict, shared_auto, \\\n variable_hooks\n\nhooks = {}\nfor h in [mutex_hooks, condition_hooks, variable_hooks]:\n hooks.update(h)\n\ndef method_decorator(method):\n def wrapped(self, *args, **kwargs):\n # print(self, *args, **kwargs)\n self._mutex.acquire()\n for var in self._variables:\n var.apply_pending_changes()\n value = method(self, *args, **kwargs)\n for var in self._variables:\n var.sync()\n self._mutex.release()\n return value\n return wrapped\n\nclass MonitorMeta(type):\n def __init__(cls, name, bases, attrs):\n super(MonitorMeta, cls).__init__(name, bases, attrs)\n for name, method in inspect.getmembers(cls, predicate=inspect.isfunction):\n if name not in ['wait', 'signal', 'register', 'shared',\n 'condition', '__init__', '__new__']:\n setattr(cls, name, method_decorator(method))\n\nclass ConditionWrapper:\n def __init__(self, condition, monitor):\n self.condition = condition\n self.monitor = monitor\n\n def wait(self):\n for var in self.monitor._variables:\n var.sync()\n self.condition.wait()\n for var in self.monitor._variables:\n var.apply_pending_changes()\n\n def signal(self):\n self.condition.signal()\n\nclass MonitorBase(object, metaclass=MonitorMeta):\n _monitor_counter = 0\n _variable_counter = 0\n _condition_counter = 0\n def __new__(cls, *args, **kwargs):\n obj = super(MonitorBase, cls).__new__(cls, *args, **kwargs)\n cls._monitor_counter += 1\n mutex_name = 'mutex-{}-{}'.format(cls.__name__, cls._monitor_counter)\n obj._mutex = Mutex(mutex_name)\n obj._variables = []\n return obj\n\n def wait(self, condition):\n condition.wait()\n\n def signal(self, condition):\n condition.signal()\n\n def register(self, variables):\n self._variables.extend(variables)\n\n def shared(self, data):\n self.__class__._variable_counter += 1\n name = 'variable-{}-{}'.format(self.__class__.__name__, self.__class__._variable_counter)\n var = shared_auto(name, data)\n self._variables.append(var)\n return var\n\n def condition(self):\n self.__class__._condition_counter += 1\n name = 'condition-{}-{}'.format(self.__class__.__name__, self.__class__._condition_counter)\n c = ConditionWrapper(Condition(self._mutex, name), self)\n return c\n\nclass Monitor(MonitorBase):\n def __init__(self):\n # self.s1 = SharedList('s1', [1,2,3])\n # self.register([self.s1])\n self.s1 = self.shared([1,2,3])\n self.c = self.condition()\n\n def test(self):\n self.wait(\"aaa\")\n print(\"test\")\n self.signal(\"aaa\")\n return 1\n\n def abc(self):\n print(\"abc\")\n return 2\n\n def seq(self):\n for i in range(10):\n print(rank, i)\n\n def list_append(self, elem):\n self.s1.append(elem)\n\n def list_print(self):\n print(self.s1)\n\nif __name__ == '__main__':\n import time\n\n from monitor.main import event_loop, send_exit\n\n m = Monitor()\n\n event_loop_thread = threading.Thread(target=event_loop, args=(hooks,))\n event_loop_thread.start()\n\n # print(m._mutex)\n # while True:\n # m.seq()\n m.list_append(5)\n time.sleep(1)\n m.list_print()\n\n send_exit()\n event_loop_thread.join()\n", "step-ids": [ 16, 17, 18, 23, 26 ] }
[ 16, 17, 18, 23, 26 ]
import pandas as pd from pymongo import MongoClient import numpy as np mongo_client = MongoClient('localhost', 27018) mongo_db = mongo_client['ProjetoIN242'] mongo_collection = mongo_db['contadorpessoas'] query = mongo_collection.find({}) df = pd.DataFrame.from_records(query) df_filtro = df[['Entrada','Dia', 'Quantidade de pessoas']] ##seleção de colunas df_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y= 'Quantidade de pessoas')
normal
{ "blob_id": "9d4559a363c4fd6f9a22dc493a7aaa0a22386c21", "index": 8071, "step-1": "<mask token>\n", "step-2": "<mask token>\ndf_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y=\n 'Quantidade de pessoas')\n", "step-3": "<mask token>\nmongo_client = MongoClient('localhost', 27018)\nmongo_db = mongo_client['ProjetoIN242']\nmongo_collection = mongo_db['contadorpessoas']\nquery = mongo_collection.find({})\ndf = pd.DataFrame.from_records(query)\ndf_filtro = df[['Entrada', 'Dia', 'Quantidade de pessoas']]\ndf_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y=\n 'Quantidade de pessoas')\n", "step-4": "import pandas as pd\nfrom pymongo import MongoClient\nimport numpy as np\nmongo_client = MongoClient('localhost', 27018)\nmongo_db = mongo_client['ProjetoIN242']\nmongo_collection = mongo_db['contadorpessoas']\nquery = mongo_collection.find({})\ndf = pd.DataFrame.from_records(query)\ndf_filtro = df[['Entrada', 'Dia', 'Quantidade de pessoas']]\ndf_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y=\n 'Quantidade de pessoas')\n", "step-5": "import pandas as pd\nfrom pymongo import MongoClient\nimport numpy as np\n\nmongo_client = MongoClient('localhost', 27018)\nmongo_db = mongo_client['ProjetoIN242']\nmongo_collection = mongo_db['contadorpessoas']\n\nquery = mongo_collection.find({})\n\ndf = pd.DataFrame.from_records(query)\n\ndf_filtro = df[['Entrada','Dia', 'Quantidade de pessoas']] ##seleção de colunas\n\ndf_filtro.groupby('Dia')['Quantidade de pessoas'].mean().plot(x='Dia', y= 'Quantidade de pessoas')\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from config import Config from flask import Flask from flask_cors import CORS from flask_migrate import Migrate from flask_sqlalchemy import SQLAlchemy app = Flask(__name__) CORS(app) app.config.from_object(Config) app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db' # app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://api:uyLmQ5M1AjCvm1R2@localhost/ws' app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False db = SQLAlchemy(app) migrate = Migrate(app, db) from ws import routes
normal
{ "blob_id": "f494d8aeee8c72cce8fc14e44ca896bcf30c100a", "index": 5627, "step-1": "<mask token>\n", "step-2": "<mask token>\nCORS(app)\napp.config.from_object(Config)\n<mask token>\n", "step-3": "<mask token>\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n<mask token>\n", "step-4": "from config import Config\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\nfrom ws import routes\n", "step-5": "from config import Config\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\nCORS(app)\napp.config.from_object(Config)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ws.db'\n# app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://api:uyLmQ5M1AjCvm1R2@localhost/ws'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\n\nfrom ws import routes\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import secrets from pathlib import Path HASHCAT_WPA_CACHE_DIR = Path.home() / ".hashcat" / "wpa-server" ROOT_PRIVATE_DIR = Path(__file__).parent.parent WORDLISTS_DIR = ROOT_PRIVATE_DIR / "wordlists" WORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / "wordlists" # user custom wordlists RULES_DIR = ROOT_PRIVATE_DIR / "rules" MASKS_DIR = ROOT_PRIVATE_DIR / "masks" LOGS_DIR = ROOT_PRIVATE_DIR / "logs" DATABASE_DIR = HASHCAT_WPA_CACHE_DIR / "database" ESSID_TRIED = DATABASE_DIR / "essid_tried" DATABASE_PATH = DATABASE_DIR / "hashcat_wpa.db" # Hashcat HASHCAT_STATUS_TIMER = 20 # seconds BENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / "benchmark.csv" HASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / "brain" / "hashcat_brain_password" # mkdirs HASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True) WORDLISTS_USER_DIR.mkdir(exist_ok=True) LOGS_DIR.mkdir(exist_ok=True) DATABASE_DIR.mkdir(exist_ok=True) HASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True) class Config: """ Flask application config """ SECRET_KEY = secrets.token_bytes(64) # Flask-SQLAlchemy settings SQLALCHEMY_DATABASE_URI = "sqlite:///{}".format(DATABASE_PATH) SQLALCHEMY_TRACK_MODIFICATIONS = False # Airodump capture files CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / "captures"
normal
{ "blob_id": "20d480517226cb7fbced765554a02fa5cbc29033", "index": 6491, "step-1": "<mask token>\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-2": "<mask token>\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-3": "<mask token>\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-4": "import secrets\nfrom pathlib import Path\nHASHCAT_WPA_CACHE_DIR = Path.home() / '.hashcat' / 'wpa-server'\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / 'wordlists'\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / 'wordlists'\nRULES_DIR = ROOT_PRIVATE_DIR / 'rules'\nMASKS_DIR = ROOT_PRIVATE_DIR / 'masks'\nLOGS_DIR = ROOT_PRIVATE_DIR / 'logs'\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / 'database'\nESSID_TRIED = DATABASE_DIR / 'essid_tried'\nDATABASE_PATH = DATABASE_DIR / 'hashcat_wpa.db'\nHASHCAT_STATUS_TIMER = 20\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / 'benchmark.csv'\nHASHCAT_BRAIN_PASSWORD_PATH = (HASHCAT_WPA_CACHE_DIR / 'brain' /\n 'hashcat_brain_password')\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n SECRET_KEY = secrets.token_bytes(64)\n SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / 'captures'\n", "step-5": "import secrets\nfrom pathlib import Path\n\nHASHCAT_WPA_CACHE_DIR = Path.home() / \".hashcat\" / \"wpa-server\"\nROOT_PRIVATE_DIR = Path(__file__).parent.parent\n\nWORDLISTS_DIR = ROOT_PRIVATE_DIR / \"wordlists\"\nWORDLISTS_USER_DIR = HASHCAT_WPA_CACHE_DIR / \"wordlists\" # user custom wordlists\nRULES_DIR = ROOT_PRIVATE_DIR / \"rules\"\nMASKS_DIR = ROOT_PRIVATE_DIR / \"masks\"\nLOGS_DIR = ROOT_PRIVATE_DIR / \"logs\"\n\nDATABASE_DIR = HASHCAT_WPA_CACHE_DIR / \"database\"\nESSID_TRIED = DATABASE_DIR / \"essid_tried\"\nDATABASE_PATH = DATABASE_DIR / \"hashcat_wpa.db\"\n\n# Hashcat\nHASHCAT_STATUS_TIMER = 20 # seconds\nBENCHMARK_FILE = HASHCAT_WPA_CACHE_DIR / \"benchmark.csv\"\nHASHCAT_BRAIN_PASSWORD_PATH = HASHCAT_WPA_CACHE_DIR / \"brain\" / \"hashcat_brain_password\"\n\n# mkdirs\nHASHCAT_WPA_CACHE_DIR.mkdir(exist_ok=True, parents=True)\nWORDLISTS_USER_DIR.mkdir(exist_ok=True)\nLOGS_DIR.mkdir(exist_ok=True)\nDATABASE_DIR.mkdir(exist_ok=True)\nHASHCAT_BRAIN_PASSWORD_PATH.parent.mkdir(exist_ok=True)\n\nclass Config:\n \"\"\" Flask application config \"\"\"\n\n SECRET_KEY = secrets.token_bytes(64)\n\n # Flask-SQLAlchemy settings\n SQLALCHEMY_DATABASE_URI = \"sqlite:///{}\".format(DATABASE_PATH)\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n\n # Airodump capture files\n CAPTURES_DIR = HASHCAT_WPA_CACHE_DIR / \"captures\"\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# author Dominik Capkovic # contact: [email protected]; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/ # GitHub: https://github.com/kilimetr packings_str = ''' Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450 Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430 Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440 Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400 Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300 Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337 Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360 Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320 Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336 Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341 Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410 Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446 Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380 Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368 Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333 Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336 Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341 Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345 Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333 Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333 Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303 NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410 NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366 NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425 NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322 Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402 Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345 Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408 Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390 Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342 Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369 Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465 Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464 Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379 Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450 Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398 Glitsch CMR ring | metal | 0.5" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495 Glitsch CMR ring | metal | 1.0" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0 Glitsch CMR ring | metal | 1.5"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0 Glitsch CMR ring | metal | 1.5" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0 TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389 Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412 Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210 VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405 VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420 Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459 Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296 Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257 Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331 Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390 Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377 Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302 Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0 Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0 Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370 Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232 Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387 DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354 DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326 Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385 Mellapak | metal  | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0 Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0 Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270 Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327 Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390 Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422 Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412 Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0 Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167 ''' packings = [] for line in packings_str.strip().splitlines(): line_items = line.split(" | ") line_items = [s.strip() for s in line_items] name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items packings.append({ 'name': name, 'material': material, 'size': size, 'N': int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl': float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL), 'CV': float(CV), }) # EXPORTING PACKING NAME seen_packing_name = set() export_packing_name = [] for i in range(len(packings)): if packings[i]["name"] not in seen_packing_name: seen_packing_name.add(packings[i]["name"]) export_packing_name.append(packings[i]["name"]) else: pass # # EXPORT PACKING SURFACEAREA # export_packing_surfacearea = [] # for item in packings: # if item["name"] == type_packing: # export_packing_surfacearea.append(item["a"]) # print(export_packing_surfacearea)
normal
{ "blob_id": "c4f656b96ddc86ab2575bd5ec646833cce95e6a9", "index": 1717, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\n<mask token>\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n", "step-3": "packings_str = \"\"\"\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal  | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n\"\"\"\npackings = []\nfor line in packings_str.strip().splitlines():\n line_items = line.split(' | ')\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({'name': name, 'material': material, 'size': size, 'N':\n int(N), 'a': float(a), 'eps': float(eps), 'CS': float(CS), 'CFl':\n float(CFl), 'Ch': float(Ch), 'CP0': float(CP0), 'CL': float(CL),\n 'CV': float(CV)})\nseen_packing_name = set()\nexport_packing_name = []\nfor i in range(len(packings)):\n if packings[i]['name'] not in seen_packing_name:\n seen_packing_name.add(packings[i]['name'])\n export_packing_name.append(packings[i]['name'])\n else:\n pass\n", "step-4": "# author Dominik Capkovic \n# contact: [email protected]; https://www.linkedin.com/in/dominik-čapkovič-b0ab8575/\n# GitHub: https://github.com/kilimetr\n\n\npackings_str = '''\n Raschig Super-Ring | metal | 0.3 | 180000 | 315.0 | 0.960 | 3.560 | 2.340 | 0.750 | 0.760 | 1.500 | 0.450\n Raschig Super-Ring | metal | 0.5 | 145000 | 250.0 | 0.975 | 3.350 | 2.200 | 0.620 | 0.780 | 1.450 | 0.430\n Raschig Super-Ring | metal | 1.0 | 32000 | 160.0 | 0.980 | 3.491 | 2.200 | 0.750 | 0.500 | 1.290 | 0.440\n Raschig Super-Ring | metal | 2.0 | 9500 | 97.6 | 0.985 | 3.326 | 2.096 | 0.720 | 0.464 | 1.323 | 0.400\n Raschig Super-Ring | metal | 3.0 | 4300 | 80.0 | 0.982 | 3.260 | 2.100 | 0.620 | 0.430 | 0.850 | 0.300\n Raschig Super-Ring | plastic | 2.0 | 9000 | 100.0 | 0.960 | 3.326 | 2.096 | 0.720 | 0.377 | 1.250 | 0.337\n Ralu Flow | plastic | 1.0 | 33000 | 165.0 | 0.940 | 3.612 | 2.401 | 0.640 | 0.485 | 1.486 | 0.360\n Ralu Flow | plastic | 2.0 | 4600 | 100.0 | 0.945 | 3.412 | 2.174 | 0.640 | 0.350 | 1.270 | 0.320\n Pall ring | metal | 25.0 | 53900 | 223.5 | 0.954 | 2.627 | 2.083 | 0.719 | 0.957 | 1.440 | 0.336\n Pall ring | metal | 35.0 | 19517 | 139.4 | 0.965 | 2.629 | 1.679 | 0.644 | 0.967 | 1.012 | 0.341\n Pall ring | metal | 50.0 | 6242 | 112.6 | 0.951 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.410\n Pall ring | plastic | 25.0 | 52300 | 225.0 | 0.887 | 2.696 | 2.064 | 0.528 | 0.865 | 0.905 | 0.446\n Pall ring | plastic | 35.0 | 17000 | 151.1 | 0.906 | 2.654 | 1.742 | 0.718 | 0.927 | 0.856 | 0.380\n Pall ring | plastic | 50.0 | 6765 | 111.1 | 0.919 | 2.816 | 1.757 | 0.593 | 0.698 | 1.239 | 0.368\n Pall ring | ceramic | 50.0 | 7502 | 155.2 | 0.754 | 3.793 | 3.024 | 1.006 | 0.233 | 1.278 | 0.333\n Ralu ring | metal | 25.0 | 51000 | 215.0 | 0.960 | 2.627 | 2.083 | 0.714 | 0.957 | 1.440 | 0.336\n Ralu ring | metal | 38.0 | 14500 | 135.0 | 0.965 | 2.629 | 1.679 | 0.644 | 1.003 | 1.277 | 0.341\n Ralu ring | metal | 50.0 | 6300 | 105.0 | 0.975 | 2.725 | 1.580 | 0.784 | 0.763 | 1.192 | 0.345\n Ralu ring | plastic | 25.0 | 36000 | 190.0 | 0.940 | 2.841 | 1.989 | 0.719 | 0.800 | 1.320 | 0.333\n Ralu ring | plastic | 38.0 | 13500 | 150.0 | 0.930 | 2.843 | 1.812 | 0.640 | 0.672 | 1.320 | 0.333\n Ralu ring | plastic | 50.0 | 5770 | 95.2 | 0.983 | 2.843 | 1.812 | 0.640 | 0.468 | 1.520 | 0.303\n NOR PAC ring | plastic | 25.0 | 48920 | 197.9 | 0.920 | 2.865 | 2.083 | 0 | 0.383 | 0.976 | 0.410\n NOR PAC ring | plastic | 25.0 | 50000 | 202.0 | 0.953 | 3.277 | 2.472 | 0.601 | 0.397 | 0.883 | 0.366\n NOR PAC ring | plastic | 35.0 | 17450 | 141.8 | 0.944 | 3.179 | 2.242 | 0.587 | 0.371 | 0.756 | 0.425\n NOR PAC ring | plastic | 50.0 | 7330 | 86.8 | 0.947 | 2.959 | 1.786 | 0.651 | 0.350 | 1.080 | 0.322\n Hiflow-ring | metal | 25.0 | 40790 | 202.9 | 0.962 | 2.918 | 2.177 | 0.799 | 0.689 | 1.641 | 0.402\n Hiflow-ring | metal | 50.0 | 6815 | 117.1 | 0.925 | 2.894 | 1.871 | 1.038 | 0.327 | 1.478 | 0.345\n Hiflow-ring | metal | 50.0 | 5000 | 92.3 | 0.977 | 2.702 | 1.626 | 0.876 | 0.421 | 1.168 | 0.408\n Hiflow-ring | plastic | 25.0 | 46100 | 194.5 | 0.918 | 2.841 | 1.989 | 0 | 0.741 | 1.577 | 0.390\n Hiflow-ring | plastic | 50S | 6050 | 82.0 | 0.942 | 2.866 | 1.702 | 0.881 | 0.414 | 1.219 | 0.342\n Hiflow-ring | plastic | 50hydr | 6890 | 118.4 | 0.925 | 2.894 | 1.871 | 0 | 0.311 | 1.553 | 0.369\n Hiflow-ring | ceramic | 20.0 | 121314 | 286.2 | 0.758 | 2.875 | 2.410 | 1.167 | 0.628 | 1.744 | 0.465\n Hiflow-ring | ceramic | 38.0 | 13241 | 111.8 | 0.788 | 2.840 | 1.930 | 0 | 0.621 | 1.659 | 0.464\n Hiflow-ring | ceramic | 50.0 | 5120 | 89.7 | 0.809 | 2.819 | 1.694 | 0 | 0.538 | 1.377 | 0.379\n Glitsch Ring | metal | 30PMK | 29200 | 180.5 | 0.975 | 2.694 | 1.900 | 0.930 | 0.851 | 1.920 | 0.450\n Glitsch Ring | metal | 30P | 31100 | 164.0 | 0.959 | 2.564 | 1.760 | 0.851 | 1.056 | 1.577 | 0.398\n Glitsch CMR ring | metal | 0.5\" | 560811 | 356.0 | 0.952 | 2.644 | 2.178 | 0 | 0.882 | 2.038 | 0.495\n Glitsch CMR ring | metal | 1.0\" | 158467 | 232.5 | 0.971 | 2.703 | 1.996 | 1.040 | 0.641 | 0 | 0\n Glitsch CMR ring | metal | 1.5\"T | 63547 | 188.0 | 0.972 | 2.790 | 1.870 | 0.870 | 0.627 | 0 | 0\n Glitsch CMR ring | metal | 1.5\" | 60744 | 174.9 | 0.974 | 2.697 | 1.841 | 0.935 | 0.632 | 0 | 0\n TOP Pak ring | alu | 50.0 | 6871 | 105.5 | 0.956 | 2.528 | 1.579 | 0.881 | 0.604 | 1.326 | 0.389\n Raschig ring | ceramic | 25.0 | 47700 | 190.0 | 0.680 | 2.454 | 1.899 | 0.577 | 1.329 | 1.361 | 0.412\n Raschig ring | ceramic | 50.0 | 5990 | 95.0 | 0.830 | 2.482 | 1.547 | 0 | 0 | 1.416 | 0.210\n VSP ring | metal | 25.0 | 33434 | 199.6 | 0.975 | 2.755 | 1.970 | 1.369 | 0.782 | 1.376 | 0.405\n VSP ring | metal | 50.0 | 7841 | 104.6 | 0.980 | 2.806 | 1.689 | 1.135 | 0.773 | 1.222 | 0.420\n Envi Pac ring | plastic | 32.0 | 53000 | 138.9 | 0.936 | 2.944 | 2.012 | 1.039 | 0.549 | 1.517 | 0.459\n Envi Pac ring | plastic | 60.0 | 6800 | 98.4 | 0.961 | 2.987 | 1.864 | 0.794 | 0.338 | 1.522 | 0.296\n Envi Pac ring | plastic | 80.0 | 2000 | 60.0 | 0.955 | 2.846 | 1.522 | 0.641 | 0.358 | 1.603 | 0.257\n Bialecki ring | metal | 25.0 | 48533 | 210.0 | 0.956 | 2.521 | 1.856 | 0.692 | 0.891 | 1.461 | 0.331\n Bialecki ring | metal | 35.0 | 18200 | 155.0 | 0.967 | 2.753 | 1.885 | 0.787 | 1.011 | 1.412 | 0.390\n Bialecki ring | metal | 35.0 | 20736 | 176.6 | 0.945 | 0 | 0 | 0.690 | 0.460 | 1.405 | 0.377\n Bialecki ring | metal | 50.0 | 6278 | 121.0 | 0.966 | 2.916 | 1.896 | 0.798 | 0.719 | 1.721 | 0.302\n Tellerette | plastic | 25.0 | 37037 | 190.0 | 0.930 | 2.913 | 2.132 | 0.588 | 0.538 | 0.899 | 0\n Hackette | plastic | 45.0 | 12000 | 139.5 | 0.928 | 2.832 | 1.966 | 0.643 | 0.399 | 0 | 0\n Raflux ring | plastic | 15.0 | 193522 | 307.9 | 0.894 | 2.825 | 2.400 | 0.491 | 0.595 | 1.913 | 0.370\n Berl saddle | ceramic | 13.0 | 691505 | 545.0 | 0.650 | 0 | 0 | 0.833 | 0 | 1.364 | 0.232\n Berl saddle | ceramic | 25.0 | 80080 | 260.0 | 0.680 | 0 | 0 | 0.620 | 0 | 1.246 | 0.387\n DIN-PAK | plastic | 47.0 | 28168 | 131.2 | 0.923 | 2.929 | 1.991 | 1.173 | 0.514 | 1.690 | 0.354\n DIN-PAK | plastic | 70.0 | 9763 | 110.7 | 0.938 | 2.970 | 1.912 | 0.991 | 0.378 | 1.527 | 0.326\n Ralu pak | metal | YC-250 | 0 | 250.0 | 0.945 | 3.178 | 2.558 | 0 | 0.191 | 1.334 | 0.385\n Mellapak | metal  | 250Y | 0 | 250.0 | 0.970 | 3.157 | 2.464 | 0.554 | 0.292 | 0 | 0\n Gempack | metal | A2T-304 | 0 | 202.0 | 0.977 | 2.986 | 2.099 | 0.678 | 0.344 | 0 | 0\n Impulse packing | metal | 250.0 | 0 | 250.0 | 0.975 | 2.610 | 1.996 | 0.431 | 0.262 | 0.983 | 0.270\n Impulse packing | ceramic | 100.0 | 0 | 91.4 | 0.838 | 2.664 | 1.655 | 1.900 | 0.417 | 1.317 | 0.327\n Montz packing | metal | B1-200 | 0 | 200.0 | 0.979 | 3.116 | 2.339 | 0.547 | 0.355 | 0.971 | 0.390\n Montz packing | metal | B2-300 | 0 | 300.0 | 0.930 | 3.098 | 2.464 | 0.482 | 0.295 | 1.165 | 0.422\n Montz packing | plastic | C1-200 | 0 | 200.0 | 0.954 | 0 | 0 | 0 | 0.453 | 1.006 | 0.412\n Montz packing | plastic | C2-200 | 0 | 200.0 | 0.900 | 2.653 | 1.973 | 0 | 0.481 | 0.739 | 0\n Euroform | plastic | PN-110 | 0 | 110.0 | 0.936 | 3.075 | 1.975 | 0.511 | 0.250 | 0.973 | 0.167\n'''\n\n\n\npackings = []\n\nfor line in packings_str.strip().splitlines():\n line_items = line.split(\" | \")\n line_items = [s.strip() for s in line_items]\n name, material, size, N, a, eps, CS, CFl, Ch, CP0, CL, CV = line_items\n packings.append({\n 'name': name,\n 'material': material,\n 'size': size,\n 'N': int(N),\n 'a': float(a),\n 'eps': float(eps),\n 'CS': float(CS),\n 'CFl': float(CFl),\n 'Ch': float(Ch),\n 'CP0': float(CP0),\n 'CL': float(CL),\n 'CV': float(CV),\n })\n\n\n\n# EXPORTING PACKING NAME\nseen_packing_name = set()\nexport_packing_name = []\n\nfor i in range(len(packings)):\n if packings[i][\"name\"] not in seen_packing_name:\n seen_packing_name.add(packings[i][\"name\"]) \n export_packing_name.append(packings[i][\"name\"])\n else:\n pass\n\n\n# # EXPORT PACKING SURFACEAREA\n# export_packing_surfacearea = []\n\n# for item in packings:\n# if item[\"name\"] == type_packing:\n# export_packing_surfacearea.append(item[\"a\"])\n\n# print(export_packing_surfacearea)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next class Solution: """ @param l1: the first list @param l2: the second list @return: the sum list of l1 and l2 """ def addLists(self, l1, l2): res = ListNode(0) p = res carry = 0 while l1 or l2 or carry: num = 0 if l1: num += l1.val l1 = l1.next if l2: num += l2.val l2 = l2.next num += carry digit, carry = num % 10, num // 10 node = ListNode(digit) p.next = node p = p.next return res.next
normal
{ "blob_id": "8909ee9c54a234222a41249e1f3005fd86e21cf0", "index": 1782, "step-1": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n", "step-2": "<mask token>\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n", "step-3": "class ListNode(object):\n <mask token>\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n", "step-4": "class ListNode(object):\n\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n @param l1: the first list\n @param l2: the second list\n @return: the sum list of l1 and l2\n \"\"\"\n\n def addLists(self, l1, l2):\n res = ListNode(0)\n p = res\n carry = 0\n while l1 or l2 or carry:\n num = 0\n if l1:\n num += l1.val\n l1 = l1.next\n if l2:\n num += l2.val\n l2 = l2.next\n num += carry\n digit, carry = num % 10, num // 10\n node = ListNode(digit)\n p.next = node\n p = p.next\n return res.next\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
import torch import torch.optim as optim import torch.nn as nn import torch.utils.data as data from dataset import InsuranceAnswerDataset, DataEmbedding from model import Matcher from tools import Trainer, Evaluator from tools import save_checkpoint, load_checkpoint, get_memory_use def main(): batch_size = 64 valid_batch_size = 8 dataset_size = 500 learning_rate = 0.001 weight_decay = 1e-4 epochs = 30 show_frq = 20 negative_size = 10 negative_expand = 1 negative_size_bound = 20 negative_retake = True load_read_model = False save_dir = '/cos_person/data/' torch.backends.cudnn.benchmark = True dm = DataEmbedding() dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size, data_type='train') valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400, data_type='valid') print(len(dataset)) model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.embedding_size, hidden_dim=150, tagset_size=50, negative_size=negative_size) embedding_matrix = torch.Tensor(dm.get_embedding_matrix()) print('before model:' + get_memory_use()) if torch.cuda.is_available(): embedding_matrix = embedding_matrix.cuda() model = model.cuda() model.encoder.embedding.weight.data.copy_(embedding_matrix) print('after model:' + get_memory_use()) train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True) valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True) optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay, amsgrad=True) train_accu_list = [] train_loss_list = [] valid_accu_list = [] valid_loss_list = [] trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer, batch_size=batch_size, data_size=len(train_loader), threshold_decay=True) valider = Evaluator(model=model, loader=valid_loader, batch_size=valid_batch_size) for epoch in range(1, epochs + 1): print('before:' + get_memory_use()) print('Epoch {} start...'.format(epoch)) model.reset_negative(dataset.negative_size) trainer.train(epoch=epoch, show_frq=show_frq, accu_list=train_accu_list, loss_list=train_loss_list) print('train after:' + get_memory_use()) model.reset_negative(valid_dataset.negative_size) valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=valid_loss_list) print('valid after:' + get_memory_use()) torch.save(train_loss_list, save_dir + 'train_loss.pkl') torch.save(train_accu_list, save_dir + 'train_accu.pkl') if negative_retake: if negative_size + negative_expand <= negative_size_bound: negative_size += negative_expand del dataset del train_loader dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size) train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True) trainer.loader = train_loader if epochs - epoch <= 5: load_read_model = True if load_read_model: if epoch <= 1: save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer) elif valid_accu_list[-1] > valid_accu_list[-2] \ or (valid_accu_list[-1] == valid_accu_list[-2] and valid_loss_list[-1] < valid_loss_list[-2]): save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer) else: checkpoint = load_checkpoint(save_dir + 'check.pkl') model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) trainer.model = model trainer.optimizer = optimizer trainer._lr_decay(0.8) valider.model = model else: torch.save(model, save_dir + 'model.pkl') torch.save(train_loss_list, save_dir + 'train_loss.pkl') torch.save(train_accu_list, save_dir + 'train_accu.pkl') torch.save(valid_loss_list, save_dir + 'valid_loss.pkl') torch.save(valid_accu_list, save_dir + 'valid_accu.pkl') torch.save(model, save_dir + 'model.pkl') test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400, data_type='test') test_loader = data.DataLoader(dataset=test_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True) tester = Evaluator(model=model, loader=test_loader, batch_size=valid_batch_size) test_accu_list = [] test_loss_list = [] model.reset_negative(test_dataset.negative_size) tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list) torch.save(test_loss_list, save_dir + 'test_loss.pkl') torch.save(test_accu_list, save_dir + 'test_accu.pkl') if __name__ == '__main__': main()
normal
{ "blob_id": "41f2a5ba0d7a726389936c1ff66a5724209ee99c", "index": 4099, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n batch_size = 64\n valid_batch_size = 8\n dataset_size = 500\n learning_rate = 0.001\n weight_decay = 0.0001\n epochs = 30\n show_frq = 20\n negative_size = 10\n negative_expand = 1\n negative_size_bound = 20\n negative_retake = True\n load_read_model = False\n save_dir = '/cos_person/data/'\n torch.backends.cudnn.benchmark = True\n dm = DataEmbedding()\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size, data_type='train')\n valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='valid')\n print(len(dataset))\n model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.\n embedding_size, hidden_dim=150, tagset_size=50, negative_size=\n negative_size)\n embedding_matrix = torch.Tensor(dm.get_embedding_matrix())\n print('before model:' + get_memory_use())\n if torch.cuda.is_available():\n embedding_matrix = embedding_matrix.cuda()\n model = model.cuda()\n model.encoder.embedding.weight.data.copy_(embedding_matrix)\n print('after model:' + get_memory_use())\n train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=True, drop_last=True)\n valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=weight_decay, amsgrad=True)\n train_accu_list = []\n train_loss_list = []\n valid_accu_list = []\n valid_loss_list = []\n trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer,\n batch_size=batch_size, data_size=len(train_loader), threshold_decay\n =True)\n valider = Evaluator(model=model, loader=valid_loader, batch_size=\n valid_batch_size)\n for epoch in range(1, epochs + 1):\n print('before:' + get_memory_use())\n print('Epoch {} start...'.format(epoch))\n model.reset_negative(dataset.negative_size)\n trainer.train(epoch=epoch, show_frq=show_frq, accu_list=\n train_accu_list, loss_list=train_loss_list)\n print('train after:' + get_memory_use())\n model.reset_negative(valid_dataset.negative_size)\n valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=\n valid_loss_list)\n print('valid after:' + get_memory_use())\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n if negative_retake:\n if negative_size + negative_expand <= negative_size_bound:\n negative_size += negative_expand\n del dataset\n del train_loader\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size)\n train_loader = data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=True, drop_last=True)\n trainer.loader = train_loader\n if epochs - epoch <= 5:\n load_read_model = True\n if load_read_model:\n if epoch <= 1:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n elif valid_accu_list[-1] > valid_accu_list[-2] or valid_accu_list[\n -1] == valid_accu_list[-2] and valid_loss_list[-1\n ] < valid_loss_list[-2]:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n else:\n checkpoint = load_checkpoint(save_dir + 'check.pkl')\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n trainer.model = model\n trainer.optimizer = optimizer\n trainer._lr_decay(0.8)\n valider.model = model\n else:\n torch.save(model, save_dir + 'model.pkl')\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n torch.save(valid_loss_list, save_dir + 'valid_loss.pkl')\n torch.save(valid_accu_list, save_dir + 'valid_accu.pkl')\n torch.save(model, save_dir + 'model.pkl')\n test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='test')\n test_loader = data.DataLoader(dataset=test_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n tester = Evaluator(model=model, loader=test_loader, batch_size=\n valid_batch_size)\n test_accu_list = []\n test_loss_list = []\n model.reset_negative(test_dataset.negative_size)\n tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list\n )\n torch.save(test_loss_list, save_dir + 'test_loss.pkl')\n torch.save(test_accu_list, save_dir + 'test_accu.pkl')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef main():\n batch_size = 64\n valid_batch_size = 8\n dataset_size = 500\n learning_rate = 0.001\n weight_decay = 0.0001\n epochs = 30\n show_frq = 20\n negative_size = 10\n negative_expand = 1\n negative_size_bound = 20\n negative_retake = True\n load_read_model = False\n save_dir = '/cos_person/data/'\n torch.backends.cudnn.benchmark = True\n dm = DataEmbedding()\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size, data_type='train')\n valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='valid')\n print(len(dataset))\n model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.\n embedding_size, hidden_dim=150, tagset_size=50, negative_size=\n negative_size)\n embedding_matrix = torch.Tensor(dm.get_embedding_matrix())\n print('before model:' + get_memory_use())\n if torch.cuda.is_available():\n embedding_matrix = embedding_matrix.cuda()\n model = model.cuda()\n model.encoder.embedding.weight.data.copy_(embedding_matrix)\n print('after model:' + get_memory_use())\n train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=True, drop_last=True)\n valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=weight_decay, amsgrad=True)\n train_accu_list = []\n train_loss_list = []\n valid_accu_list = []\n valid_loss_list = []\n trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer,\n batch_size=batch_size, data_size=len(train_loader), threshold_decay\n =True)\n valider = Evaluator(model=model, loader=valid_loader, batch_size=\n valid_batch_size)\n for epoch in range(1, epochs + 1):\n print('before:' + get_memory_use())\n print('Epoch {} start...'.format(epoch))\n model.reset_negative(dataset.negative_size)\n trainer.train(epoch=epoch, show_frq=show_frq, accu_list=\n train_accu_list, loss_list=train_loss_list)\n print('train after:' + get_memory_use())\n model.reset_negative(valid_dataset.negative_size)\n valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=\n valid_loss_list)\n print('valid after:' + get_memory_use())\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n if negative_retake:\n if negative_size + negative_expand <= negative_size_bound:\n negative_size += negative_expand\n del dataset\n del train_loader\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size)\n train_loader = data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=True, drop_last=True)\n trainer.loader = train_loader\n if epochs - epoch <= 5:\n load_read_model = True\n if load_read_model:\n if epoch <= 1:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n elif valid_accu_list[-1] > valid_accu_list[-2] or valid_accu_list[\n -1] == valid_accu_list[-2] and valid_loss_list[-1\n ] < valid_loss_list[-2]:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n else:\n checkpoint = load_checkpoint(save_dir + 'check.pkl')\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n trainer.model = model\n trainer.optimizer = optimizer\n trainer._lr_decay(0.8)\n valider.model = model\n else:\n torch.save(model, save_dir + 'model.pkl')\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n torch.save(valid_loss_list, save_dir + 'valid_loss.pkl')\n torch.save(valid_accu_list, save_dir + 'valid_accu.pkl')\n torch.save(model, save_dir + 'model.pkl')\n test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='test')\n test_loader = data.DataLoader(dataset=test_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n tester = Evaluator(model=model, loader=test_loader, batch_size=\n valid_batch_size)\n test_accu_list = []\n test_loss_list = []\n model.reset_negative(test_dataset.negative_size)\n tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list\n )\n torch.save(test_loss_list, save_dir + 'test_loss.pkl')\n torch.save(test_accu_list, save_dir + 'test_accu.pkl')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.utils.data as data\nfrom dataset import InsuranceAnswerDataset, DataEmbedding\nfrom model import Matcher\nfrom tools import Trainer, Evaluator\nfrom tools import save_checkpoint, load_checkpoint, get_memory_use\n\n\ndef main():\n batch_size = 64\n valid_batch_size = 8\n dataset_size = 500\n learning_rate = 0.001\n weight_decay = 0.0001\n epochs = 30\n show_frq = 20\n negative_size = 10\n negative_expand = 1\n negative_size_bound = 20\n negative_retake = True\n load_read_model = False\n save_dir = '/cos_person/data/'\n torch.backends.cudnn.benchmark = True\n dm = DataEmbedding()\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size, data_type='train')\n valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='valid')\n print(len(dataset))\n model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.\n embedding_size, hidden_dim=150, tagset_size=50, negative_size=\n negative_size)\n embedding_matrix = torch.Tensor(dm.get_embedding_matrix())\n print('before model:' + get_memory_use())\n if torch.cuda.is_available():\n embedding_matrix = embedding_matrix.cuda()\n model = model.cuda()\n model.encoder.embedding.weight.data.copy_(embedding_matrix)\n print('after model:' + get_memory_use())\n train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size,\n shuffle=True, drop_last=True)\n valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=weight_decay, amsgrad=True)\n train_accu_list = []\n train_loss_list = []\n valid_accu_list = []\n valid_loss_list = []\n trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer,\n batch_size=batch_size, data_size=len(train_loader), threshold_decay\n =True)\n valider = Evaluator(model=model, loader=valid_loader, batch_size=\n valid_batch_size)\n for epoch in range(1, epochs + 1):\n print('before:' + get_memory_use())\n print('Epoch {} start...'.format(epoch))\n model.reset_negative(dataset.negative_size)\n trainer.train(epoch=epoch, show_frq=show_frq, accu_list=\n train_accu_list, loss_list=train_loss_list)\n print('train after:' + get_memory_use())\n model.reset_negative(valid_dataset.negative_size)\n valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=\n valid_loss_list)\n print('valid after:' + get_memory_use())\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n if negative_retake:\n if negative_size + negative_expand <= negative_size_bound:\n negative_size += negative_expand\n del dataset\n del train_loader\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=negative_size)\n train_loader = data.DataLoader(dataset=dataset, batch_size=\n batch_size, shuffle=True, drop_last=True)\n trainer.loader = train_loader\n if epochs - epoch <= 5:\n load_read_model = True\n if load_read_model:\n if epoch <= 1:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n elif valid_accu_list[-1] > valid_accu_list[-2] or valid_accu_list[\n -1] == valid_accu_list[-2] and valid_loss_list[-1\n ] < valid_loss_list[-2]:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=\n model, optimizer=optimizer)\n else:\n checkpoint = load_checkpoint(save_dir + 'check.pkl')\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n trainer.model = model\n trainer.optimizer = optimizer\n trainer._lr_decay(0.8)\n valider.model = model\n else:\n torch.save(model, save_dir + 'model.pkl')\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n torch.save(valid_loss_list, save_dir + 'valid_loss.pkl')\n torch.save(valid_accu_list, save_dir + 'valid_accu.pkl')\n torch.save(model, save_dir + 'model.pkl')\n test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size,\n negative_size=400, data_type='test')\n test_loader = data.DataLoader(dataset=test_dataset, batch_size=\n valid_batch_size, shuffle=True, drop_last=True)\n tester = Evaluator(model=model, loader=test_loader, batch_size=\n valid_batch_size)\n test_accu_list = []\n test_loss_list = []\n model.reset_negative(test_dataset.negative_size)\n tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list\n )\n torch.save(test_loss_list, save_dir + 'test_loss.pkl')\n torch.save(test_accu_list, save_dir + 'test_accu.pkl')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.utils.data as data\n\nfrom dataset import InsuranceAnswerDataset, DataEmbedding\nfrom model import Matcher\nfrom tools import Trainer, Evaluator\nfrom tools import save_checkpoint, load_checkpoint, get_memory_use\n\n\ndef main():\n batch_size = 64\n valid_batch_size = 8\n dataset_size = 500\n learning_rate = 0.001\n weight_decay = 1e-4\n epochs = 30\n show_frq = 20\n negative_size = 10\n negative_expand = 1\n negative_size_bound = 20\n negative_retake = True\n load_read_model = False\n save_dir = '/cos_person/data/'\n torch.backends.cudnn.benchmark = True\n\n dm = DataEmbedding()\n\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size, data_type='train')\n valid_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400,\n data_type='valid')\n\n print(len(dataset))\n\n model = Matcher(embedding_dim=dm.embedding_dim, vocab_size=dm.embedding_size,\n hidden_dim=150, tagset_size=50, negative_size=negative_size)\n\n embedding_matrix = torch.Tensor(dm.get_embedding_matrix())\n print('before model:' + get_memory_use())\n if torch.cuda.is_available():\n embedding_matrix = embedding_matrix.cuda()\n model = model.cuda()\n model.encoder.embedding.weight.data.copy_(embedding_matrix)\n print('after model:' + get_memory_use())\n\n train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n valid_loader = data.DataLoader(dataset=valid_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True)\n\n optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay, amsgrad=True)\n\n train_accu_list = []\n train_loss_list = []\n valid_accu_list = []\n valid_loss_list = []\n\n trainer = Trainer(model=model, loader=train_loader, optimizer=optimizer, batch_size=batch_size,\n data_size=len(train_loader), threshold_decay=True)\n valider = Evaluator(model=model, loader=valid_loader, batch_size=valid_batch_size)\n for epoch in range(1, epochs + 1):\n print('before:' + get_memory_use())\n print('Epoch {} start...'.format(epoch))\n model.reset_negative(dataset.negative_size)\n trainer.train(epoch=epoch, show_frq=show_frq, accu_list=train_accu_list, loss_list=train_loss_list)\n print('train after:' + get_memory_use())\n model.reset_negative(valid_dataset.negative_size)\n valider.evaluate(epoch=epoch, accu_list=valid_accu_list, loss_list=valid_loss_list)\n print('valid after:' + get_memory_use())\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n if negative_retake:\n if negative_size + negative_expand <= negative_size_bound:\n negative_size += negative_expand\n del dataset\n del train_loader\n dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=negative_size)\n train_loader = data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, drop_last=True)\n trainer.loader = train_loader\n if epochs - epoch <= 5:\n load_read_model = True\n if load_read_model:\n if epoch <= 1:\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer)\n elif valid_accu_list[-1] > valid_accu_list[-2] \\\n or (valid_accu_list[-1] == valid_accu_list[-2] and valid_loss_list[-1] < valid_loss_list[-2]):\n save_checkpoint(save_dir=save_dir + 'check.pkl', model=model, optimizer=optimizer)\n else:\n checkpoint = load_checkpoint(save_dir + 'check.pkl')\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n trainer.model = model\n trainer.optimizer = optimizer\n trainer._lr_decay(0.8)\n valider.model = model\n else:\n torch.save(model, save_dir + 'model.pkl')\n\n torch.save(train_loss_list, save_dir + 'train_loss.pkl')\n torch.save(train_accu_list, save_dir + 'train_accu.pkl')\n torch.save(valid_loss_list, save_dir + 'valid_loss.pkl')\n torch.save(valid_accu_list, save_dir + 'valid_accu.pkl')\n torch.save(model, save_dir + 'model.pkl')\n\n test_dataset = InsuranceAnswerDataset(dataset_size=dataset_size, negative_size=400, data_type='test')\n test_loader = data.DataLoader(dataset=test_dataset, batch_size=valid_batch_size, shuffle=True, drop_last=True)\n tester = Evaluator(model=model, loader=test_loader, batch_size=valid_batch_size)\n test_accu_list = []\n test_loss_list = []\n model.reset_negative(test_dataset.negative_size)\n tester.evaluate(epoch=1, accu_list=test_accu_list, loss_list=test_loss_list)\n torch.save(test_loss_list, save_dir + 'test_loss.pkl')\n torch.save(test_accu_list, save_dir + 'test_accu.pkl')\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# e.g. 8-34 from tkinter import * from PP4E.launchmodes import PortableLauncher import os, sys demoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale'] for demo in demoModules: pid = os.fork() filepath = './' + demo + '.py' if pid == 0: os.execvp('python3.5', (filepath, )) root = Tk() root.title('Progress') Label(root, text='Multiple program demo: command lines', bg='white').pack() root.mainloop()
normal
{ "blob_id": "d91dc850c293cf085e1be04b6e13e0a62cb0bcb1", "index": 9812, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\n<mask token>\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n", "step-3": "<mask token>\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n", "step-4": "from tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath,))\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n", "step-5": "# e.g. 8-34\n\nfrom tkinter import *\nfrom PP4E.launchmodes import PortableLauncher\nimport os, sys\n\n\ndemoModules = ['demoDlg', 'demoRadio', 'demoCheck', 'demoScale']\n\nfor demo in demoModules:\n pid = os.fork()\n filepath = './' + demo + '.py'\n if pid == 0:\n os.execvp('python3.5', (filepath, ))\n\nroot = Tk()\nroot.title('Progress')\nLabel(root, text='Multiple program demo: command lines', bg='white').pack()\nroot.mainloop()\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Generated by Django 3.0.5 on 2020-04-25 15:35 from django.db import migrations, models import lots.models class Migration(migrations.Migration): dependencies = [ ('lots', '0012_auto_20200425_1720'), ] operations = [ migrations.AlterField( model_name='lots', name='photo', field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename), ), ]
normal
{ "blob_id": "b36f3ffed888edaa7716f712f1549dc205799caf", "index": 6338, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n", "step-4": "from django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lots', '0012_auto_20200425_1720')]\n operations = [migrations.AlterField(model_name='lots', name='photo',\n field=models.ImageField(default='images/default.png', upload_to=\n lots.models.path_and_rename))]\n", "step-5": "# Generated by Django 3.0.5 on 2020-04-25 15:35\n\nfrom django.db import migrations, models\nimport lots.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lots', '0012_auto_20200425_1720'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='lots',\n name='photo',\n field=models.ImageField(default='images/default.png', upload_to=lots.models.path_and_rename),\n ),\n ]\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# Dependencies import pandas as pd # Load in data file from resources bank_data = "Resources/budget_data.csv" # Read and display with pandas bank_df = pd.read_csv(bank_data) # Find the total number of months included in the dataset total_months = bank_df["Date"].count() # Find the total net amount of "Profit/Losses" over the entire period net_end = bank_df["Profit/Losses"].sum() # Create a new column that displays profit or loss between months bank_df["Change"] = bank_df["Profit/Losses"].diff() # Find the average change in "Profit/Losses" between months over the entire period average_change = bank_df["Change"].mean() # Find the greatest increase in profits (date and amount) over the entire period greatest_increase = bank_df["Change"].max() greatest_increase_month = bank_df.loc[bank_df["Change"] == greatest_increase, :] # Find the greatest decrease in losses (date and amount) over the entire period greatest_decrease = bank_df["Change"].min() greatest_decrease_month = bank_df.loc[bank_df["Change"] == greatest_decrease, :] # Print financial analysis financial_analysis = (print("Financial Analysis"), print("----------------------------"), print(f'Total Months: {total_months}'), print(f'Total: {net_end}'), print(f'Average Change: ${round(average_change)}'), print(f'Greatest Increase in Profits:'), print(str(greatest_increase_month)), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)) # Export to .txt output = open("output.txt", "w") line1 = "Financial Analysis" line2 = "---------------------" line3 = str(f'Total Months: {total_months}') line4 = str(f'Total: {net_end}') line5 = str(f'Average Change: ${average_change}') line6 = str(f'Greatest Increase in Profits: {greatest_increase_month}') line7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}') output.write('{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(line1,line2,line3,line4,line5,line6,line7))
normal
{ "blob_id": "1ad694c68ef264c6fbba4f4b9c069f22818d2816", "index": 9973, "step-1": "<mask token>\n", "step-2": "<mask token>\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-3": "<mask token>\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-4": "import pandas as pd\nbank_data = 'Resources/budget_data.csv'\nbank_df = pd.read_csv(bank_data)\ntotal_months = bank_df['Date'].count()\nnet_end = bank_df['Profit/Losses'].sum()\nbank_df['Change'] = bank_df['Profit/Losses'].diff()\naverage_change = bank_df['Change'].mean()\ngreatest_increase = bank_df['Change'].max()\ngreatest_increase_month = bank_df.loc[bank_df['Change'] == greatest_increase, :\n ]\ngreatest_decrease = bank_df['Change'].min()\ngreatest_decrease_month = bank_df.loc[bank_df['Change'] == greatest_decrease, :\n ]\nfinancial_analysis = print('Financial Analysis'), print(\n '----------------------------'), print(f'Total Months: {total_months}'\n ), print(f'Total: {net_end}'), print(\n f'Average Change: ${round(average_change)}'), print(\n f'Greatest Increase in Profits:'), print(str(greatest_increase_month)\n ), print(f'Greatest Decrease in Profits:'), print(greatest_decrease_month)\noutput = open('output.txt', 'w')\nline1 = 'Financial Analysis'\nline2 = '---------------------'\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write(\"\"\"{}\n{}\n{}\n{}\n{}\n{}\n{}\n\"\"\".format(line1, line2, line3, line4,\n line5, line6, line7))\n", "step-5": "# Dependencies\nimport pandas as pd\n\n# Load in data file from resources\nbank_data = \"Resources/budget_data.csv\"\n\n# Read and display with pandas\nbank_df = pd.read_csv(bank_data)\n\n# Find the total number of months included in the dataset\ntotal_months = bank_df[\"Date\"].count()\n\n# Find the total net amount of \"Profit/Losses\" over the entire period\nnet_end = bank_df[\"Profit/Losses\"].sum()\n\n# Create a new column that displays profit or loss between months\nbank_df[\"Change\"] = bank_df[\"Profit/Losses\"].diff()\n\n# Find the average change in \"Profit/Losses\" between months over the entire period\naverage_change = bank_df[\"Change\"].mean()\n\n# Find the greatest increase in profits (date and amount) over the entire period\ngreatest_increase = bank_df[\"Change\"].max()\ngreatest_increase_month = bank_df.loc[bank_df[\"Change\"] == greatest_increase, :]\n\n# Find the greatest decrease in losses (date and amount) over the entire period\ngreatest_decrease = bank_df[\"Change\"].min()\ngreatest_decrease_month = bank_df.loc[bank_df[\"Change\"] == greatest_decrease, :]\n\n# Print financial analysis\nfinancial_analysis = (print(\"Financial Analysis\"), print(\"----------------------------\"), \nprint(f'Total Months: {total_months}'), print(f'Total: {net_end}'), \nprint(f'Average Change: ${round(average_change)}'), \nprint(f'Greatest Increase in Profits:'), \nprint(str(greatest_increase_month)),\nprint(f'Greatest Decrease in Profits:'), \nprint(greatest_decrease_month))\n\n# Export to .txt\noutput = open(\"output.txt\", \"w\")\n\nline1 = \"Financial Analysis\"\nline2 = \"---------------------\"\nline3 = str(f'Total Months: {total_months}')\nline4 = str(f'Total: {net_end}')\nline5 = str(f'Average Change: ${average_change}')\nline6 = str(f'Greatest Increase in Profits: {greatest_increase_month}')\nline7 = str(f'Greatest Decrease in Profits: {greatest_decrease_month}')\noutput.write('{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(line1,line2,line3,line4,line5,line6,line7))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- import itertools as itt import random import unittest from collections import Counter from uuid import uuid4 import numpy as np from pybel import BELGraph from pybel.constants import INCREASES, PROTEIN from pybel.dsl import protein from pybel_tools.selection import get_random_subgraph from pybel_tools.selection.random_subgraph import randomly_select_node def n(): """Generates a PyBEL node tuple :rtype: tuple """ return PROTEIN, 'TEST', str(uuid4()) class TestRandomSelectNode(unittest.TestCase): """Test random node selection""" def setUp(self): self.random_state = np.random.RandomState(seed=127) self.trials = 30000 def test_randomly_select_node_1(self): """Tests that randomly selecting nodes works""" a, b, c, d = (n() for _ in range(4)) g = BELGraph() g.add_edge(a, b) g.add_edge(b, c) g.add_edge(b, d) self.assertEqual(1, g.degree(a)) self.assertEqual(3, g.degree(b)) self.assertEqual(1, g.degree(c)) self.assertEqual(1, g.degree(d)) no_grow = set() node_counter = Counter( randomly_select_node(g, no_grow, self.random_state) for _ in range(self.trials) ) self.assertIn(a, node_counter) self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2) self.assertIn(b, node_counter) self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2) self.assertIn(c, node_counter) self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2) self.assertIn(d, node_counter) self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2) def test_randomly_select_node_2(self): """Tests that randomly selecting nodes works, but disallow C""" a, b, c, d = (n() for _ in range(4)) g = BELGraph() g.add_edge(a, b) g.add_edge(b, c) g.add_edge(b, d) self.assertEqual(1, g.degree(a)) self.assertEqual(3, g.degree(b)) self.assertEqual(1, g.degree(c)) self.assertEqual(1, g.degree(d)) no_grow = {c} node_counter = Counter( randomly_select_node(g, no_grow, self.random_state) for _ in range(self.trials) ) self.assertIn(a, node_counter) self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2) self.assertIn(b, node_counter) self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2) self.assertNotIn(c, node_counter) self.assertIn(d, node_counter) self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2) def make_nodes(n): """Returns a list of PyBEL node data dictionaries :param int n: number nodes :rtype: list[protein] """ return [ protein(namespace='NS', name=str(i)) for i in range(1, n) ] class TestRandomSample(unittest.TestCase): def setUp(self): np.random.seed(127) def test_okay(self): graph = BELGraph() nodes = make_nodes(50) edges = list(itt.combinations(nodes, r=2)) random.shuffle(edges) n_edges = 500 for u, v in edges[:n_edges]: graph.add_qualified_edge( u, v, relation=INCREASES, citation=str(uuid4()), evidence=str(uuid4()), ) self.assertEqual(n_edges, graph.number_of_edges()) sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127) self.assertEqual(250, sg.number_of_edges()) def test_too_small(self): graph = BELGraph() nodes = make_nodes(11) edges = list(itt.combinations(nodes, r=2)) random.shuffle(edges) n_edges = 25 for u, v in edges[:n_edges]: graph.add_qualified_edge( u, v, relation=INCREASES, citation=str(uuid4()), evidence=str(uuid4()), ) self.assertEqual(n_edges, graph.number_of_edges()) sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127) self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg='since graph is too small, the subgraph should contain the whole thing')
normal
{ "blob_id": "3a88ff479e3b01518d79e9930c29514863f96f9b", "index": 1568, "step-1": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n", "step-2": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n", "step-3": "<mask token>\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n", "step-4": "<mask token>\n\n\ndef n():\n \"\"\"Generates a PyBEL node tuple\n\n :rtype: tuple\n \"\"\"\n return PROTEIN, 'TEST', str(uuid4())\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = set()\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 6, node_counter[b] / self.trials, places=2)\n self.assertIn(c, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[c] / self.trials, places=2)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 6, node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n no_grow = {c}\n node_counter = Counter(randomly_select_node(g, no_grow, self.\n random_state) for _ in range(self.trials))\n self.assertIn(a, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[a] / self.trials, places=2)\n self.assertIn(b, node_counter)\n self.assertAlmostEqual(3 / 5, node_counter[b] / self.trials, places=2)\n self.assertNotIn(c, node_counter)\n self.assertIn(d, node_counter)\n self.assertAlmostEqual(1 / 5, node_counter[d] / self.trials, places=2)\n\n\n<mask token>\n\n\nclass TestRandomSample(unittest.TestCase):\n\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 500\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n n_edges = 25\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(u, v, relation=INCREASES, citation=str\n (uuid4()), evidence=str(uuid4()))\n self.assertEqual(n_edges, graph.number_of_edges())\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges\n =5, seed=127)\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(), msg\n =\n 'since graph is too small, the subgraph should contain the whole thing'\n )\n", "step-5": "# -*- coding: utf-8 -*-\n\nimport itertools as itt\nimport random\nimport unittest\nfrom collections import Counter\nfrom uuid import uuid4\n\nimport numpy as np\n\nfrom pybel import BELGraph\nfrom pybel.constants import INCREASES, PROTEIN\nfrom pybel.dsl import protein\nfrom pybel_tools.selection import get_random_subgraph\nfrom pybel_tools.selection.random_subgraph import randomly_select_node\n\n\ndef n():\n \"\"\"Generates a PyBEL node tuple\n\n :rtype: tuple\n \"\"\"\n return PROTEIN, 'TEST', str(uuid4())\n\n\nclass TestRandomSelectNode(unittest.TestCase):\n \"\"\"Test random node selection\"\"\"\n\n def setUp(self):\n self.random_state = np.random.RandomState(seed=127)\n self.trials = 30000\n\n def test_randomly_select_node_1(self):\n \"\"\"Tests that randomly selecting nodes works\"\"\"\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = set()\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 6), node_counter[b] / self.trials, places=2)\n\n self.assertIn(c, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[c] / self.trials, places=2)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 6), node_counter[d] / self.trials, places=2)\n\n def test_randomly_select_node_2(self):\n \"\"\"Tests that randomly selecting nodes works, but disallow C\"\"\"\n a, b, c, d = (n() for _ in range(4))\n\n g = BELGraph()\n g.add_edge(a, b)\n g.add_edge(b, c)\n g.add_edge(b, d)\n\n self.assertEqual(1, g.degree(a))\n self.assertEqual(3, g.degree(b))\n self.assertEqual(1, g.degree(c))\n self.assertEqual(1, g.degree(d))\n\n no_grow = {c}\n\n node_counter = Counter(\n randomly_select_node(g, no_grow, self.random_state)\n for _ in range(self.trials)\n )\n\n self.assertIn(a, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[a] / self.trials, places=2)\n\n self.assertIn(b, node_counter)\n self.assertAlmostEqual((3 / 5), node_counter[b] / self.trials, places=2)\n\n self.assertNotIn(c, node_counter)\n\n self.assertIn(d, node_counter)\n self.assertAlmostEqual((1 / 5), node_counter[d] / self.trials, places=2)\n\n\ndef make_nodes(n):\n \"\"\"Returns a list of PyBEL node data dictionaries\n\n :param int n: number nodes\n :rtype: list[protein]\n \"\"\"\n return [\n protein(namespace='NS', name=str(i))\n for i in range(1, n)\n ]\n\n\nclass TestRandomSample(unittest.TestCase):\n def setUp(self):\n np.random.seed(127)\n\n def test_okay(self):\n graph = BELGraph()\n nodes = make_nodes(50)\n\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n\n n_edges = 500\n\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(\n u, v,\n relation=INCREASES,\n citation=str(uuid4()),\n evidence=str(uuid4()),\n )\n\n self.assertEqual(n_edges, graph.number_of_edges())\n\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)\n self.assertEqual(250, sg.number_of_edges())\n\n def test_too_small(self):\n graph = BELGraph()\n nodes = make_nodes(11)\n\n edges = list(itt.combinations(nodes, r=2))\n random.shuffle(edges)\n\n n_edges = 25\n\n for u, v in edges[:n_edges]:\n graph.add_qualified_edge(\n u, v,\n relation=INCREASES,\n citation=str(uuid4()),\n evidence=str(uuid4()),\n )\n\n self.assertEqual(n_edges, graph.number_of_edges())\n\n sg = get_random_subgraph(graph, number_edges=250, number_seed_edges=5, seed=127)\n\n self.assertEqual(graph.number_of_edges(), sg.number_of_edges(),\n msg='since graph is too small, the subgraph should contain the whole thing')\n", "step-ids": [ 5, 8, 9, 10, 13 ] }
[ 5, 8, 9, 10, 13 ]
import datetime import time def calculate(a): return a data = set() class Bank: amount = 0 def __init__(self): self.Bank_name = "State Bank of India" self.ifsc = 'SBI0N00012' def __repr__(self): return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} ' # self.stored = datetime.date.today() class CustomerDetails(Bank): check_amt = 18 def __init__(self,name,identity,acc,op_amount): Bank.__init__(self) self.name = name self.identity = identity self.acc = acc self.op_amount = op_amount Bank.amount += self.op_amount self.count = 0 def __repr__(self): return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} ' # stored = datetime.datetime.today() # def __repr__(self) def deposite(self,credit): self.credit = credit self.op_amount += self.credit Bank.amount += self.op_amount print(f'You\'ve added {self.credit} : Total Amount = {self.op_amount}') return (Bank.amount) def check_balance(self): self.count += 1 if self.count > 3: self.op_amount -= CustomerDetails.check_amt return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} ' else: return f'{self.name} your Balance : {self.op_amount}' # cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000) # print(cus1) cus2 = CustomerDetails('Pawan','755376288078','37376989161',10000) print(cus2) cus2.deposite(20000) print(cus2.check_balance()) print(cus2.check_balance()) print(cus2.check_balance()) print(cus2.check_balance()) print(cus2) # print(cus2.check_balance())
normal
{ "blob_id": "66ae7f4ee01ca5516d8e3dc447eeb4709e2b6aec", "index": 4615, "step-1": "<mask token>\n\n\nclass Bank:\n <mask token>\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n", "step-4": "import datetime\nimport time\n\n\ndef calculate(a):\n return a\n\n\ndata = set()\n\n\nclass Bank:\n amount = 0\n\n def __init__(self):\n self.Bank_name = 'State Bank of India'\n self.ifsc = 'SBI0N00012'\n\n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n\nclass CustomerDetails(Bank):\n check_amt = 18\n\n def __init__(self, name, identity, acc, op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return (\n f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n )\n\n def deposite(self, credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f\"You've added {self.credit} : Total Amount = {self.op_amount}\")\n return Bank.amount\n\n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return (\n f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n )\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n\n\ncus2 = CustomerDetails('Pawan', '755376288078', '37376989161', 10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n", "step-5": "import datetime\nimport time\n\ndef calculate(a):\n return a\n\n\ndata = set()\nclass Bank:\n amount = 0\n def __init__(self):\n self.Bank_name = \"State Bank of India\"\n self.ifsc = 'SBI0N00012'\n \n def __repr__(self):\n return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '\n\n # self.stored = datetime.date.today()\n\nclass CustomerDetails(Bank):\n check_amt = 18\n def __init__(self,name,identity,acc,op_amount):\n Bank.__init__(self)\n self.name = name\n self.identity = identity\n self.acc = acc\n self.op_amount = op_amount\n Bank.amount += self.op_amount\n self.count = 0\n\n def __repr__(self):\n return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '\n\n # stored = datetime.datetime.today()\n # def __repr__(self)\n def deposite(self,credit):\n self.credit = credit\n self.op_amount += self.credit\n Bank.amount += self.op_amount\n print(f'You\\'ve added {self.credit} : Total Amount = {self.op_amount}')\n return (Bank.amount)\n \n def check_balance(self):\n self.count += 1\n if self.count > 3:\n self.op_amount -= CustomerDetails.check_amt\n return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '\n else:\n return f'{self.name} your Balance : {self.op_amount}'\n \n \n\n# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)\n# print(cus1)\ncus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)\nprint(cus2)\ncus2.deposite(20000)\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2.check_balance())\nprint(cus2)\n# print(cus2.check_balance())\n\n\n ", "step-ids": [ 9, 10, 13, 14, 15 ] }
[ 9, 10, 13, 14, 15 ]
import sys if sys.version_info.major == 2: from itertools import izip else: izip = zip
normal
{ "blob_id": "88445d8466d7acbf29d2525c7e322611d66494cd", "index": 8315, "step-1": "<mask token>\n", "step-2": "<mask token>\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n", "step-3": "import sys\nif sys.version_info.major == 2:\n from itertools import izip\nelse:\n izip = zip\n", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from django.urls import path from . import views # . current directory urlpatterns = [ path("", views.index, name="index"), path("login", views.login_view, name="login"), path("logout", views.logout_view, name="logout"), path("menu", views.menu, name="menu"), path("add_item", views.add_item, name="add_item"), path("confirm_order", views.confirm_order, name="confirm_order") ]
normal
{ "blob_id": "9be6940fc6f405db652d478f9a74fcf56d8a0ad7", "index": 3470, "step-1": "<mask token>\n", "step-2": "<mask token>\nurlpatterns = [path('', views.index, name='index'), path('login', views.\n login_view, name='login'), path('logout', views.logout_view, name=\n 'logout'), path('menu', views.menu, name='menu'), path('add_item',\n views.add_item, name='add_item'), path('confirm_order', views.\n confirm_order, name='confirm_order')]\n", "step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.index, name='index'), path('login', views.\n login_view, name='login'), path('logout', views.logout_view, name=\n 'logout'), path('menu', views.menu, name='menu'), path('add_item',\n views.add_item, name='add_item'), path('confirm_order', views.\n confirm_order, name='confirm_order')]\n", "step-4": "from django.urls import path\nfrom . import views # . current directory\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"menu\", views.menu, name=\"menu\"),\n path(\"add_item\", views.add_item, name=\"add_item\"),\n path(\"confirm_order\", views.confirm_order, name=\"confirm_order\")\n]", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# -*- coding: utf-8 -*- """ Created on Thu May 24 18:18:36 2018 @author: Nicole """ from __future__ import division import Rod import matplotlib.pyplot as plt import math class Truss: def __init__(self,node1,node2,size,result,ax): self.node1=node1 self.node2=node2 self.rod=Rod.Rod(node1,node2,result) self.size=size self.result=result self.ax=ax self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2) def PlotCalculatedTruss(self): self.node1.PlotNode() self.node1.PlotSupport() self.node1.PlotForce() self.node2.PlotNode() self.node2.PlotSupport() self.node2.PlotForce() self.rod.PlotRod() self.rod.PlotResult() def PlotUncalculatedTruss(self): self.node1.PlotNode() self.node1.PlotSupport() self.node1.PlotForce() self.node2.PlotNode() self.node2.PlotSupport() self.node2.PlotForce() self.rod.PlotRod() def SaveTrussFig(self): plt.savefig('truss.png',dpi=600) plt.show() ''' pud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667) pud.setfig() pud.plot() pud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333) pud.plot() pud.savefig() '''
normal
{ "blob_id": "f01a1b6d0de4ba685c489af2742159447f943d2d", "index": 5605, "step-1": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n <mask token>\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n <mask token>\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n", "step-4": "<mask token>\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\n\nclass Truss:\n\n def __init__(self, node1, node2, size, result, ax):\n self.node1 = node1\n self.node2 = node2\n self.rod = Rod.Rod(node1, node2, result)\n self.size = size\n self.result = result\n self.ax = ax\n self.length = math.sqrt((node1.x - node2.x) ** 2 + (node1.y - node2\n .y) ** 2)\n\n def PlotCalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n\n def SaveTrussFig(self):\n plt.savefig('truss.png', dpi=600)\n plt.show()\n\n\n<mask token>\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 24 18:18:36 2018\n\n@author: Nicole\n\"\"\"\n\n\nfrom __future__ import division\nimport Rod\nimport matplotlib.pyplot as plt\nimport math\n\nclass Truss:\n def __init__(self,node1,node2,size,result,ax):\n self.node1=node1\n self.node2=node2\n self.rod=Rod.Rod(node1,node2,result)\n self.size=size\n self.result=result\n self.ax=ax\n self.length=math.sqrt((node1.x-node2.x)**2+(node1.y-node2.y)**2)\n def PlotCalculatedTruss(self): \n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n self.rod.PlotResult()\n def PlotUncalculatedTruss(self):\n self.node1.PlotNode()\n self.node1.PlotSupport()\n self.node1.PlotForce()\n self.node2.PlotNode()\n self.node2.PlotSupport()\n self.node2.PlotForce()\n self.rod.PlotRod()\n def SaveTrussFig(self):\n plt.savefig('truss.png',dpi=600)\n plt.show()\n\n'''\npud=UnitPostProcess(1.8,1.4,3.4,3.2,1,1,1,0,5,0,0,8,8.0,48.6667)\npud.setfig()\npud.plot()\npud=UnitPostProcess(3.4,3.2,7.4,3.2,0,0,1,1,0,0,0,0,8.0,23.3333)\npud.plot()\npud.savefig()\n'''", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# Demo - train the decoders & use them to stylize image from __future__ import print_function from train import train from infer import stylize from utils import list_images IS_TRAINING = True # for training TRAINING_IMGS_PATH = 'MS_COCO' ENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz' MODEL_SAVE_PATH = 'models/autoencoder' MODEL_SAVE_SUFFIX = '-done' DEBUG = True LOGGING_PERIOD = 10 AUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1] # for inferring (stylize) CONTENTS_DIR = 'images/content' STYLES_DIR = 'images/style' OUTPUT_DIR = 'outputs' STYLE_RATIO = 0.8 REPEAT_PIPELINE = 1 AUTUENCODER_LEVELS_INFER = [3, 2, 1] def main(): if IS_TRAINING: training_imgs_paths = list_images(TRAINING_IMGS_PATH) train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH, autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG, logging_period=LOGGING_PERIOD) print('\n>>>>>> Successfully done training...\n') else: contents_path = list_images(CONTENTS_DIR) styles_path = list_images(STYLES_DIR) model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX stylize(contents_path, styles_path, OUTPUT_DIR, ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO, repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=AUTUENCODER_LEVELS_INFER) print('\n>>>>>> Successfully done stylizing...\n') if __name__ == '__main__': main()
normal
{ "blob_id": "31ed798118f20005b5a26bc1fc0053b7d0a95657", "index": 5366, "step-1": "<mask token>\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nIS_TRAINING = True\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "from __future__ import print_function\nfrom train import train\nfrom infer import stylize\nfrom utils import list_images\nIS_TRAINING = True\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "# Demo - train the decoders & use them to stylize image\n\nfrom __future__ import print_function\n\nfrom train import train\nfrom infer import stylize\nfrom utils import list_images\n\n\nIS_TRAINING = True\n\n# for training\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\n\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\n\n# for inferring (stylize)\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\n\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n\n train(training_imgs_paths,\n ENCODER_WEIGHTS_PATH,\n MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN,\n debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n \n print('\\n>>>>>> Successfully done training...\\n')\n\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n\n stylize(contents_path, \n styles_path, \n OUTPUT_DIR, \n ENCODER_WEIGHTS_PATH, \n model_path, \n style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE,\n autoencoder_levels=AUTUENCODER_LEVELS_INFER)\n\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# coding: utf-8 """ StockX API PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501 OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import io_stockx from io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: E501 from io_stockx.rest import ApiException class TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase): """PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs""" def setUp(self): pass def tearDown(self): pass def testPortfolioIdDelResponsePortfolioItemProductMedia(self): """Test PortfolioIdDelResponsePortfolioItemProductMedia""" # FIXME: construct object with mandatory attributes with example values # model = io_stockx.models.portfolio_id_del_response_portfolio_item_product_media.PortfolioIdDelResponsePortfolioItemProductMedia() # noqa: E501 pass if __name__ == '__main__': unittest.main()
normal
{ "blob_id": "ae88418ccfdaa4b357a2491f6450dbcda55b1c21", "index": 2013, "step-1": "<mask token>\n\n\nclass TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase):\n <mask token>\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPortfolioIdDelResponsePortfolioItemProductMedia(self):\n \"\"\"Test PortfolioIdDelResponsePortfolioItemProductMedia\"\"\"\n pass\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase):\n \"\"\"PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPortfolioIdDelResponsePortfolioItemProductMedia(self):\n \"\"\"Test PortfolioIdDelResponsePortfolioItemProductMedia\"\"\"\n pass\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase):\n \"\"\"PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPortfolioIdDelResponsePortfolioItemProductMedia(self):\n \"\"\"Test PortfolioIdDelResponsePortfolioItemProductMedia\"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "<mask token>\nfrom __future__ import absolute_import\nimport unittest\nimport io_stockx\nfrom io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia\nfrom io_stockx.rest import ApiException\n\n\nclass TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase):\n \"\"\"PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPortfolioIdDelResponsePortfolioItemProductMedia(self):\n \"\"\"Test PortfolioIdDelResponsePortfolioItemProductMedia\"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "# coding: utf-8\n\n\"\"\"\n StockX API\n\n PRERELEASE API - Subject to change before release. Provides access to StockX's public services, allowing end users to query for product and order information. # noqa: E501\n\n OpenAPI spec version: 1.0.0\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport io_stockx\nfrom io_stockx.models.portfolio_id_del_response_portfolio_item_product_media import PortfolioIdDelResponsePortfolioItemProductMedia # noqa: E501\nfrom io_stockx.rest import ApiException\n\n\nclass TestPortfolioIdDelResponsePortfolioItemProductMedia(unittest.TestCase):\n \"\"\"PortfolioIdDelResponsePortfolioItemProductMedia unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPortfolioIdDelResponsePortfolioItemProductMedia(self):\n \"\"\"Test PortfolioIdDelResponsePortfolioItemProductMedia\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = io_stockx.models.portfolio_id_del_response_portfolio_item_product_media.PortfolioIdDelResponsePortfolioItemProductMedia() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
def solution(name): Len = len(name) nameList = [name[i] for i in range(Len)] nameField = ['A' for i in range(Len)] answer = 0 # 정방향 for i in range(Len): a = ord(nameField[i]) b = ord(nameList[i]) if b-a <= 13 : # 절반 이하면 그냥 더하고 answer += b-a else : # 절반 넘으면 26에서 빼기 answer += 26 - (b-a) nameField[i] = nameList[i] # name "A"가 들어간게 있을수도 있으니 if nameField == nameList : # 값 바꿔주고 전체 체크!! break answer +=1 # 이동가중치 ++ dap = answer # 정방향 + 역방향 t = (int)(Len/2) for i in range(t): # 0~전체길이/2 nameField = ['A' for i in range(Len)] answer = i for j in range(i+1): #정방향 a = ord(nameField[j]) b = ord(nameList[j]) if b-a <= 13 : answer += b-a else : answer += 26 - (b-a) nameField[j] = nameList[j] if nameField == nameList : break answer +=1 for j in range(Len-1,i,-1): #역방향 a = ord(nameField[j]) b = ord(nameList[j]) if b-a <= 13 : answer += b-a else : answer += 26 - (b-a) nameField[j] = nameList[j] if nameField == nameList : break answer +=1 dap = min(dap,answer) return dap ''' 중복코드로 많아 함수로 빼고싶었지만..패쓰! 정방향의 가중치와 정방향으로 0~길이/2 만큼까지 가고 + 역방향 가면서 원하는 name만들어졌는지 계속 체크! 최소가중치를 구해서 출력!! '''
normal
{ "blob_id": "8766003a85b1ed83927988df147b0b3004cb91f9", "index": 7691, "step-1": "<mask token>\n", "step-2": "def solution(name):\n Len = len(name)\n nameList = [name[i] for i in range(Len)]\n nameField = ['A' for i in range(Len)]\n answer = 0\n for i in range(Len):\n a = ord(nameField[i])\n b = ord(nameList[i])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[i] = nameList[i]\n if nameField == nameList:\n break\n answer += 1\n dap = answer\n t = int(Len / 2)\n for i in range(t):\n nameField = ['A' for i in range(Len)]\n answer = i\n for j in range(i + 1):\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[j] = nameList[j]\n if nameField == nameList:\n break\n answer += 1\n for j in range(Len - 1, i, -1):\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b - a <= 13:\n answer += b - a\n else:\n answer += 26 - (b - a)\n nameField[j] = nameList[j]\n if nameField == nameList:\n break\n answer += 1\n dap = min(dap, answer)\n return dap\n\n\n<mask token>\n", "step-3": "def solution(name):\n Len = len(name)\n nameList = [name[i] for i in range(Len)]\n nameField = ['A' for i in range(Len)]\n answer = 0\n \n # 정방향\n for i in range(Len):\n a = ord(nameField[i])\n b = ord(nameList[i])\n if b-a <= 13 : # 절반 이하면 그냥 더하고\n answer += b-a\n else : # 절반 넘으면 26에서 빼기\n answer += 26 - (b-a)\n \n nameField[i] = nameList[i] # name \"A\"가 들어간게 있을수도 있으니\n if nameField == nameList : # 값 바꿔주고 전체 체크!!\n break\n\n answer +=1 # 이동가중치 ++\n dap = answer\n \n # 정방향 + 역방향\n t = (int)(Len/2)\n for i in range(t): # 0~전체길이/2\n nameField = ['A' for i in range(Len)]\n answer = i\n \n for j in range(i+1): #정방향\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b-a <= 13 :\n answer += b-a\n else :\n answer += 26 - (b-a)\n\n nameField[j] = nameList[j]\n if nameField == nameList :\n break\n\n answer +=1\n \n\n for j in range(Len-1,i,-1): #역방향\n a = ord(nameField[j])\n b = ord(nameList[j])\n if b-a <= 13 :\n answer += b-a\n else :\n answer += 26 - (b-a)\n\n nameField[j] = nameList[j]\n if nameField == nameList :\n break\n\n answer +=1\n \n dap = min(dap,answer)\n \n return dap\n\n\n'''\n중복코드로 많아 함수로 빼고싶었지만..패쓰!\n\n정방향의 가중치와\n정방향으로 0~길이/2 만큼까지 가고 + 역방향 가면서\n원하는 name만들어졌는지 계속 체크!\n\n최소가중치를 구해서 출력!!\n'''", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
import sys from pypsi.pipes import ThreadLocalStream from pypsi.shell import Shell from pypsi.core import pypsi_print from nose.tools import * class PypsiTestShell(Shell): pass class TestShellBootstrap(object): def setUp(self): self.real_stdout = sys.stdout self.real_stderr = sys.stderr self.real_stdin = sys.stdin self.real_print = print self.shell = PypsiTestShell() def tearDown(self): self.shell.restore() def test_bootstrap_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_bootstrap_stream_type, attr yield self._test_bootstrap_stream_instance, attr def _test_bootstrap_stream_type(self, attr): assert_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_bootstrap_stream_instance(self, attr): assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self, 'real_' + attr)) def test_bootstrap_print(self): assert_equal(print, pypsi_print) def test_restore_print(self): self.shell.restore() assert_equal(print, self.real_print) def test_restore_streams(self): for attr in ('stdout', 'stderr', 'stdin'): yield self._test_restore_stream_type, attr yield self._test_restore_stream_instance, attr def _test_restore_stream_type(self, attr): self.shell.restore() assert_not_is_instance(getattr(sys, attr), ThreadLocalStream) def _test_restore_stream_instance(self, attr): self.shell.restore() assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))
normal
{ "blob_id": "1983340b3ce7ba8b631ba090871bea1ef7044943", "index": 9333, "step-1": "<mask token>\n\n\nclass TestShellBootstrap(object):\n <mask token>\n\n def tearDown(self):\n self.shell.restore()\n <mask token>\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n <mask token>\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-2": "<mask token>\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n <mask token>\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-3": "<mask token>\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-4": "import sys\nfrom pypsi.pipes import ThreadLocalStream\nfrom pypsi.shell import Shell\nfrom pypsi.core import pypsi_print\nfrom nose.tools import *\n\n\nclass PypsiTestShell(Shell):\n pass\n\n\nclass TestShellBootstrap(object):\n\n def setUp(self):\n self.real_stdout = sys.stdout\n self.real_stderr = sys.stderr\n self.real_stdin = sys.stdin\n self.real_print = print\n self.shell = PypsiTestShell()\n\n def tearDown(self):\n self.shell.restore()\n\n def test_bootstrap_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_bootstrap_stream_type, attr\n yield self._test_bootstrap_stream_instance, attr\n\n def _test_bootstrap_stream_type(self, attr):\n assert_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_bootstrap_stream_instance(self, attr):\n assert_equal(getattr(sys, attr)._get_target_stream(), getattr(self,\n 'real_' + attr))\n\n def test_bootstrap_print(self):\n assert_equal(print, pypsi_print)\n\n def test_restore_print(self):\n self.shell.restore()\n assert_equal(print, self.real_print)\n\n def test_restore_streams(self):\n for attr in ('stdout', 'stderr', 'stdin'):\n yield self._test_restore_stream_type, attr\n yield self._test_restore_stream_instance, attr\n\n def _test_restore_stream_type(self, attr):\n self.shell.restore()\n assert_not_is_instance(getattr(sys, attr), ThreadLocalStream)\n\n def _test_restore_stream_instance(self, attr):\n self.shell.restore()\n assert_equal(getattr(sys, attr), getattr(self, 'real_' + attr))\n", "step-5": null, "step-ids": [ 7, 10, 12, 13 ] }
[ 7, 10, 12, 13 ]
from selenium import webdriver from time import sleep import os.path import time import datetime driver =webdriver.Chrome(executable_path=r'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe') counter=0 while True : driver.get("https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0") start='C://Users//Pathak//Downloads//chromedriver_win32' df=str(counter); gh=str(time.time()) ft=df+gh+'.png' final=os.path.join(start,ft) driver.get_screenshot_as_file(final) counter+=1 sleep(20) driver.quit()
normal
{ "blob_id": "30e7fc169eceb3d8cc1a4fa6bb65d81a4403f2c7", "index": 5800, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n", "step-3": "<mask token>\ndriver = webdriver.Chrome(executable_path=\n 'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\ncounter = 0\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n", "step-4": "from selenium import webdriver\nfrom time import sleep\nimport os.path\nimport time\nimport datetime\ndriver = webdriver.Chrome(executable_path=\n 'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\ncounter = 0\nwhile True:\n driver.get(\n 'https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0'\n )\n start = 'C://Users//Pathak//Downloads//chromedriver_win32'\n df = str(counter)\n gh = str(time.time())\n ft = df + gh + '.png'\n final = os.path.join(start, ft)\n driver.get_screenshot_as_file(final)\n counter += 1\n sleep(20)\ndriver.quit()\n", "step-5": "from selenium import webdriver\r\nfrom time import sleep\r\nimport os.path\r\nimport time\r\nimport datetime\r\ndriver =webdriver.Chrome(executable_path=r'C:/Users/Pathak/Downloads/chromedriver_win32/chromedriver.exe')\r\ncounter=0\r\nwhile True :\r\n\t\r\n\r\n\tdriver.get(\"https://www.google.co.in/maps/@18.9967228,73.118955,21z/data=!5m1!1e1?hl=en&authuser=0\")\r\n\tstart='C://Users//Pathak//Downloads//chromedriver_win32'\r\n\tdf=str(counter);\r\n\tgh=str(time.time())\r\n\r\n\tft=df+gh+'.png'\r\n\tfinal=os.path.join(start,ft)\r\n\tdriver.get_screenshot_as_file(final) \r\n\tcounter+=1\r\n\t\r\n\tsleep(20)\r\n\r\ndriver.quit()\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys import os from configparser import ConfigParser import logging from mod_argparse import setup_cli from checkers.IndexFile import DocumentIndex, ProgressNoteIndex from checkers import source_files from utilities import write_to_file, strip # , write_to_db_isok # import pandas as pd logger = logging.getLogger(__name__) DOCUMENTS = 1 PROGRESS_NOTES = 2 DOC_TYPE = { DOCUMENTS: { 'file_type': 'Document', 'folder': 'Documents', 'class': DocumentIndex, 'log': 'd', 'dates': ['CREATED_TIMESTAMP', 'POST_DATE'], 'converters': {'FILENAME': strip, 'DISPLAY_DESC': strip, 'DOC_COMMENT': strip, 'DOCUMENT_TYPE': strip}, 'staff_field': 'displayname' }, PROGRESS_NOTES: { 'file_type': 'ProgressNote', 'folder': 'ProgressNotes', 'class': ProgressNoteIndex, 'log': 'p', 'dates': ['CREATED_TIMESTAMP'], 'converters': {}, 'staff_field': 'login' } } def folders_with_documents(pat_ids, main_dir_name, doc_prog_folder): str_pat_ids = [str(pat_id) for pat_id in pat_ids] str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(str_pat_id, doc_prog_folder)) for str_pat_id in str_pat_ids] # Patients/*/Documents for pid, folder in zip(str_pat_ids, str_pat_folder_names): if os.path.isdir(folder) and len(os.listdir(folder)) > 1: yield pid, folder # # def write_db_isok(list, pid): # df = pd.concat(list) # return write_to_db_isok(df, pid) def df_chunksof_100(df): lendf = len(df) top100 = df.iloc[:, :100] df = df.iloc[:, 100:] yield top100, df # # def indxfiles_todb(config, pat_ids, main_dir_name, doctype_info): # idx_chkr_cls = doctype_info['class'].initialize(config) # log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv' # # # df = pd.DataFrame() # list_ = [] # pid_start = 0 # pid_end = 0 # df_rows = 0 # # https://stackoverflow.com/questions/50689082/to-sql-pyodbc-count-field-incorrect-or-syntax-error # skipped_list = [] # for pid, pid_src_folder in folders_with_documents(pat_ids, main_dir_name, doctype_info['folder']): # index_file_path = os.path.join(pid_src_folder, indx_filename) # #int_pid = int(pid) # pid_end = int(pid) # try: # df = pd.read_csv(index_file_path, index_col=None, header=0, # converters=doctype_info['converters'], # # date_parser=pd.core.tools.datetimes.to_datetime, # # parse_dates=doctype_info['dates'] # ) # df['PID'] = pid # # #len_df = len(df) # # # if len_df > 50: # # df_1 = pd.concat(list_) # # if write_to_db_isok(df_1, f'{pid_start} - {pid_end}'): # # logger.info(f'Wrote to DB..upto pid: {pid} dfrows: {df_rows}') # # else: # # logger.error(f'Skipping batch because of errors : {pid_start} - {pid_end} dfrows: {df_rows} ') # # skipped_list.extend(range(pid_start, pid_end)) # # pid_start = pid_end + 1 # # # # else: # # list_.append(df) # # df_rows += len_df # # list_.append(df) # df_rows += len(df) # if len(list_) > 5 or df_rows > 30: # df = pd.concat(list_) # if write_to_db_isok(df, f'{pid_start} - {pid_end}'): # logger.info(f'Wrote to DB..upto pid: {pid} dfrows: {df_rows}') # else: # logger.error(f'Skipping batch because of errors : {pid_start} - {pid_end} dfrows: {df_rows} ') # skipped_list.extend(range(pid_start, pid_end)) # list_ = [] # pid_start = pid_end+1 # df_rows = 0 # # except pd.errors.ParserError as pe: # logger.error(f"Something went wrong (ParseError) pid:{pid}. skipping..") # list_ = [] # pid_start = pid_end + 1 # df_rows = 0 # continue # # if list_: # df = pd.concat(list_) # if write_to_db_isok(df, {pid_start} - {pid_end}): # logger.info(f'Wrote to DB..upto pid: {pid}') # # logger.warning(f'Skipped list of pids: {",".join([str(i) for i in skipped_list])}') def getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info): idx_chkr_cls = doctype_info['class'].initialize(config) log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv' for pid, pid_src_folder in folders_with_documents(pat_ids, main_dir_name, doctype_info['folder']): index_file_path = os.path.join(pid_src_folder, indx_filename) idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid, index_file_path) if not idxfile_data: logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..') continue not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(pid, built_src_filenames, pid_src_folder, log, indx_filename) if any([*not_in_indexbuilt_files, *not_in_folderlisting]): logger.debug(f'{pid}{log} | invalid folder. skipping') continue # . . . . . . . . . . . folder listing should match the file names built from index file . . . . . . . . for idxfile_row, built_src_file in zip(idxfile_data, built_src_filenames): destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row) if not destn_filename: logger.error(f'{pid}{log} | Could not form the destination filename string') continue logger.info(f'{pid}{log} | src: {built_src_file} dest:{destn_filename}') yield pid, built_src_file, destn_filename def main(): config = setup_cli(sys.argv[1:]) logger.info(f'{sys.argv[0]} -f {config.file_type} -s {config.start_range}' f' -e {config.end_range} -d {config.base_directory}') docinfo = DOC_TYPE[config.file_type] main_dir_name = os.path.join(config.base_directory, 'Patients') results_doc = os.path.join(os.path.join(config.base_directory, 'results'), docinfo['folder']) logger.debug(f'results doc : {results_doc}') parser = ConfigParser() parser.read('./config/settings.indexfile.ini') outfile = os.path.join('output', f"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv") if not os.path.isfile(outfile): with open(outfile, 'w', newline='') as csv_file: write_to_file("pid", "src", "dest", csv_file, "size") #indxfiles_todb(parser, range(config.start_range, config.end_range), # main_dir_name, doctype_info) with open(outfile, 'a', newline='') as csv_file: for p, s, d in getvalid_src_dest_filepaths(parser, range(config.start_range, config.end_range), main_dir_name, docinfo): full_src_path = f"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}" write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path)) if __name__ == "__main__": main() # from checkers import sanitize # print(sanitize.fix_altdoctitle('None')) # print(sanitize.fix_doctitle('None')) # print(sanitize.fix_timestamp('None')) # print(sanitize.fix_pid('None')) # print(sanitize.fix_fileno('None')) # print(sanitize.fix_ext('...')) # print(sanitize.fix_altdoctitle(None)) # print(sanitize.fix_doctitle(None)) # print(sanitize.fix_timestamp(None)) # print(sanitize.fix_pid(None)) # print(sanitize.fix_fileno(None)) # print(sanitize.fix_ext('aa')) # print(sanitize.fix_ext('asd')) # print(sanitize.fix_ext('bbbb')) # print(sanitize.fix_ext('ccccc')) # print(sanitize.fix_pid('asdf')) # print(sanitize.fix_fileno(3.4))
normal
{ "blob_id": "e38ae7f91deed1be00e60b7516210ea1feefe23e", "index": 285, "step-1": "<mask token>\n\n\ndef folders_with_documents(pat_ids, main_dir_name, doc_prog_folder):\n str_pat_ids = [str(pat_id) for pat_id in pat_ids]\n str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(\n str_pat_id, doc_prog_folder)) for str_pat_id in str_pat_ids]\n for pid, folder in zip(str_pat_ids, str_pat_folder_names):\n if os.path.isdir(folder) and len(os.listdir(folder)) > 1:\n yield pid, folder\n\n\ndef df_chunksof_100(df):\n lendf = len(df)\n top100 = df.iloc[:, :100]\n df = df.iloc[:, 100:]\n yield top100, df\n\n\ndef getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info):\n idx_chkr_cls = doctype_info['class'].initialize(config)\n log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n for pid, pid_src_folder in folders_with_documents(pat_ids,\n main_dir_name, doctype_info['folder']):\n index_file_path = os.path.join(pid_src_folder, indx_filename)\n idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid,\n index_file_path)\n if not idxfile_data:\n logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..')\n continue\n not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(\n pid, built_src_filenames, pid_src_folder, log, indx_filename)\n if any([*not_in_indexbuilt_files, *not_in_folderlisting]):\n logger.debug(f'{pid}{log} | invalid folder. skipping')\n continue\n for idxfile_row, built_src_file in zip(idxfile_data,\n built_src_filenames):\n destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row)\n if not destn_filename:\n logger.error(\n f'{pid}{log} | Could not form the destination filename string'\n )\n continue\n logger.info(\n f'{pid}{log} | src: {built_src_file} dest:{destn_filename}')\n yield pid, built_src_file, destn_filename\n\n\ndef main():\n config = setup_cli(sys.argv[1:])\n logger.info(\n f'{sys.argv[0]} -f {config.file_type} -s {config.start_range} -e {config.end_range} -d {config.base_directory}'\n )\n docinfo = DOC_TYPE[config.file_type]\n main_dir_name = os.path.join(config.base_directory, 'Patients')\n results_doc = os.path.join(os.path.join(config.base_directory,\n 'results'), docinfo['folder'])\n logger.debug(f'results doc : {results_doc}')\n parser = ConfigParser()\n parser.read('./config/settings.indexfile.ini')\n outfile = os.path.join('output',\n f\"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv\"\n )\n if not os.path.isfile(outfile):\n with open(outfile, 'w', newline='') as csv_file:\n write_to_file('pid', 'src', 'dest', csv_file, 'size')\n with open(outfile, 'a', newline='') as csv_file:\n for p, s, d in getvalid_src_dest_filepaths(parser, range(config.\n start_range, config.end_range), main_dir_name, docinfo):\n full_src_path = (\n f\"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}\"\n )\n write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef folders_with_documents(pat_ids, main_dir_name, doc_prog_folder):\n str_pat_ids = [str(pat_id) for pat_id in pat_ids]\n str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(\n str_pat_id, doc_prog_folder)) for str_pat_id in str_pat_ids]\n for pid, folder in zip(str_pat_ids, str_pat_folder_names):\n if os.path.isdir(folder) and len(os.listdir(folder)) > 1:\n yield pid, folder\n\n\ndef df_chunksof_100(df):\n lendf = len(df)\n top100 = df.iloc[:, :100]\n df = df.iloc[:, 100:]\n yield top100, df\n\n\ndef getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info):\n idx_chkr_cls = doctype_info['class'].initialize(config)\n log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n for pid, pid_src_folder in folders_with_documents(pat_ids,\n main_dir_name, doctype_info['folder']):\n index_file_path = os.path.join(pid_src_folder, indx_filename)\n idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid,\n index_file_path)\n if not idxfile_data:\n logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..')\n continue\n not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(\n pid, built_src_filenames, pid_src_folder, log, indx_filename)\n if any([*not_in_indexbuilt_files, *not_in_folderlisting]):\n logger.debug(f'{pid}{log} | invalid folder. skipping')\n continue\n for idxfile_row, built_src_file in zip(idxfile_data,\n built_src_filenames):\n destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row)\n if not destn_filename:\n logger.error(\n f'{pid}{log} | Could not form the destination filename string'\n )\n continue\n logger.info(\n f'{pid}{log} | src: {built_src_file} dest:{destn_filename}')\n yield pid, built_src_file, destn_filename\n\n\ndef main():\n config = setup_cli(sys.argv[1:])\n logger.info(\n f'{sys.argv[0]} -f {config.file_type} -s {config.start_range} -e {config.end_range} -d {config.base_directory}'\n )\n docinfo = DOC_TYPE[config.file_type]\n main_dir_name = os.path.join(config.base_directory, 'Patients')\n results_doc = os.path.join(os.path.join(config.base_directory,\n 'results'), docinfo['folder'])\n logger.debug(f'results doc : {results_doc}')\n parser = ConfigParser()\n parser.read('./config/settings.indexfile.ini')\n outfile = os.path.join('output',\n f\"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv\"\n )\n if not os.path.isfile(outfile):\n with open(outfile, 'w', newline='') as csv_file:\n write_to_file('pid', 'src', 'dest', csv_file, 'size')\n with open(outfile, 'a', newline='') as csv_file:\n for p, s, d in getvalid_src_dest_filepaths(parser, range(config.\n start_range, config.end_range), main_dir_name, docinfo):\n full_src_path = (\n f\"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}\"\n )\n write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nlogger = logging.getLogger(__name__)\nDOCUMENTS = 1\nPROGRESS_NOTES = 2\nDOC_TYPE = {DOCUMENTS: {'file_type': 'Document', 'folder': 'Documents',\n 'class': DocumentIndex, 'log': 'd', 'dates': ['CREATED_TIMESTAMP',\n 'POST_DATE'], 'converters': {'FILENAME': strip, 'DISPLAY_DESC': strip,\n 'DOC_COMMENT': strip, 'DOCUMENT_TYPE': strip}, 'staff_field':\n 'displayname'}, PROGRESS_NOTES: {'file_type': 'ProgressNote', 'folder':\n 'ProgressNotes', 'class': ProgressNoteIndex, 'log': 'p', 'dates': [\n 'CREATED_TIMESTAMP'], 'converters': {}, 'staff_field': 'login'}}\n\n\ndef folders_with_documents(pat_ids, main_dir_name, doc_prog_folder):\n str_pat_ids = [str(pat_id) for pat_id in pat_ids]\n str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(\n str_pat_id, doc_prog_folder)) for str_pat_id in str_pat_ids]\n for pid, folder in zip(str_pat_ids, str_pat_folder_names):\n if os.path.isdir(folder) and len(os.listdir(folder)) > 1:\n yield pid, folder\n\n\ndef df_chunksof_100(df):\n lendf = len(df)\n top100 = df.iloc[:, :100]\n df = df.iloc[:, 100:]\n yield top100, df\n\n\ndef getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info):\n idx_chkr_cls = doctype_info['class'].initialize(config)\n log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n for pid, pid_src_folder in folders_with_documents(pat_ids,\n main_dir_name, doctype_info['folder']):\n index_file_path = os.path.join(pid_src_folder, indx_filename)\n idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid,\n index_file_path)\n if not idxfile_data:\n logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..')\n continue\n not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(\n pid, built_src_filenames, pid_src_folder, log, indx_filename)\n if any([*not_in_indexbuilt_files, *not_in_folderlisting]):\n logger.debug(f'{pid}{log} | invalid folder. skipping')\n continue\n for idxfile_row, built_src_file in zip(idxfile_data,\n built_src_filenames):\n destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row)\n if not destn_filename:\n logger.error(\n f'{pid}{log} | Could not form the destination filename string'\n )\n continue\n logger.info(\n f'{pid}{log} | src: {built_src_file} dest:{destn_filename}')\n yield pid, built_src_file, destn_filename\n\n\ndef main():\n config = setup_cli(sys.argv[1:])\n logger.info(\n f'{sys.argv[0]} -f {config.file_type} -s {config.start_range} -e {config.end_range} -d {config.base_directory}'\n )\n docinfo = DOC_TYPE[config.file_type]\n main_dir_name = os.path.join(config.base_directory, 'Patients')\n results_doc = os.path.join(os.path.join(config.base_directory,\n 'results'), docinfo['folder'])\n logger.debug(f'results doc : {results_doc}')\n parser = ConfigParser()\n parser.read('./config/settings.indexfile.ini')\n outfile = os.path.join('output',\n f\"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv\"\n )\n if not os.path.isfile(outfile):\n with open(outfile, 'w', newline='') as csv_file:\n write_to_file('pid', 'src', 'dest', csv_file, 'size')\n with open(outfile, 'a', newline='') as csv_file:\n for p, s, d in getvalid_src_dest_filepaths(parser, range(config.\n start_range, config.end_range), main_dir_name, docinfo):\n full_src_path = (\n f\"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}\"\n )\n write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import sys\nimport os\nfrom configparser import ConfigParser\nimport logging\nfrom mod_argparse import setup_cli\nfrom checkers.IndexFile import DocumentIndex, ProgressNoteIndex\nfrom checkers import source_files\nfrom utilities import write_to_file, strip\nlogger = logging.getLogger(__name__)\nDOCUMENTS = 1\nPROGRESS_NOTES = 2\nDOC_TYPE = {DOCUMENTS: {'file_type': 'Document', 'folder': 'Documents',\n 'class': DocumentIndex, 'log': 'd', 'dates': ['CREATED_TIMESTAMP',\n 'POST_DATE'], 'converters': {'FILENAME': strip, 'DISPLAY_DESC': strip,\n 'DOC_COMMENT': strip, 'DOCUMENT_TYPE': strip}, 'staff_field':\n 'displayname'}, PROGRESS_NOTES: {'file_type': 'ProgressNote', 'folder':\n 'ProgressNotes', 'class': ProgressNoteIndex, 'log': 'p', 'dates': [\n 'CREATED_TIMESTAMP'], 'converters': {}, 'staff_field': 'login'}}\n\n\ndef folders_with_documents(pat_ids, main_dir_name, doc_prog_folder):\n str_pat_ids = [str(pat_id) for pat_id in pat_ids]\n str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(\n str_pat_id, doc_prog_folder)) for str_pat_id in str_pat_ids]\n for pid, folder in zip(str_pat_ids, str_pat_folder_names):\n if os.path.isdir(folder) and len(os.listdir(folder)) > 1:\n yield pid, folder\n\n\ndef df_chunksof_100(df):\n lendf = len(df)\n top100 = df.iloc[:, :100]\n df = df.iloc[:, 100:]\n yield top100, df\n\n\ndef getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info):\n idx_chkr_cls = doctype_info['class'].initialize(config)\n log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n for pid, pid_src_folder in folders_with_documents(pat_ids,\n main_dir_name, doctype_info['folder']):\n index_file_path = os.path.join(pid_src_folder, indx_filename)\n idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid,\n index_file_path)\n if not idxfile_data:\n logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..')\n continue\n not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(\n pid, built_src_filenames, pid_src_folder, log, indx_filename)\n if any([*not_in_indexbuilt_files, *not_in_folderlisting]):\n logger.debug(f'{pid}{log} | invalid folder. skipping')\n continue\n for idxfile_row, built_src_file in zip(idxfile_data,\n built_src_filenames):\n destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row)\n if not destn_filename:\n logger.error(\n f'{pid}{log} | Could not form the destination filename string'\n )\n continue\n logger.info(\n f'{pid}{log} | src: {built_src_file} dest:{destn_filename}')\n yield pid, built_src_file, destn_filename\n\n\ndef main():\n config = setup_cli(sys.argv[1:])\n logger.info(\n f'{sys.argv[0]} -f {config.file_type} -s {config.start_range} -e {config.end_range} -d {config.base_directory}'\n )\n docinfo = DOC_TYPE[config.file_type]\n main_dir_name = os.path.join(config.base_directory, 'Patients')\n results_doc = os.path.join(os.path.join(config.base_directory,\n 'results'), docinfo['folder'])\n logger.debug(f'results doc : {results_doc}')\n parser = ConfigParser()\n parser.read('./config/settings.indexfile.ini')\n outfile = os.path.join('output',\n f\"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv\"\n )\n if not os.path.isfile(outfile):\n with open(outfile, 'w', newline='') as csv_file:\n write_to_file('pid', 'src', 'dest', csv_file, 'size')\n with open(outfile, 'a', newline='') as csv_file:\n for p, s, d in getvalid_src_dest_filepaths(parser, range(config.\n start_range, config.end_range), main_dir_name, docinfo):\n full_src_path = (\n f\"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}\"\n )\n write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path))\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import sys\nimport os\nfrom configparser import ConfigParser\nimport logging\nfrom mod_argparse import setup_cli\nfrom checkers.IndexFile import DocumentIndex, ProgressNoteIndex\nfrom checkers import source_files\nfrom utilities import write_to_file, strip # , write_to_db_isok\n# import pandas as pd\n\nlogger = logging.getLogger(__name__)\nDOCUMENTS = 1\nPROGRESS_NOTES = 2\n\nDOC_TYPE = {\n DOCUMENTS: {\n 'file_type': 'Document',\n 'folder': 'Documents',\n 'class': DocumentIndex,\n 'log': 'd',\n 'dates': ['CREATED_TIMESTAMP', 'POST_DATE'],\n 'converters': {'FILENAME': strip,\n 'DISPLAY_DESC': strip,\n 'DOC_COMMENT': strip,\n 'DOCUMENT_TYPE': strip},\n 'staff_field': 'displayname'\n },\n PROGRESS_NOTES: {\n 'file_type': 'ProgressNote',\n 'folder': 'ProgressNotes',\n 'class': ProgressNoteIndex,\n 'log': 'p',\n 'dates': ['CREATED_TIMESTAMP'],\n 'converters': {},\n 'staff_field': 'login'\n }\n}\n\n\ndef folders_with_documents(pat_ids, main_dir_name, doc_prog_folder):\n str_pat_ids = [str(pat_id) for pat_id in pat_ids]\n str_pat_folder_names = [os.path.join(main_dir_name, os.path.join(str_pat_id, doc_prog_folder))\n for str_pat_id in str_pat_ids] # Patients/*/Documents\n\n for pid, folder in zip(str_pat_ids, str_pat_folder_names):\n if os.path.isdir(folder) and len(os.listdir(folder)) > 1:\n yield pid, folder\n\n#\n# def write_db_isok(list, pid):\n# df = pd.concat(list)\n# return write_to_db_isok(df, pid)\n\n\ndef df_chunksof_100(df):\n lendf = len(df)\n\n top100 = df.iloc[:, :100]\n df = df.iloc[:, 100:]\n yield top100, df\n#\n# def indxfiles_todb(config, pat_ids, main_dir_name, doctype_info):\n# idx_chkr_cls = doctype_info['class'].initialize(config)\n# log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n#\n# # df = pd.DataFrame()\n# list_ = []\n# pid_start = 0\n# pid_end = 0\n# df_rows = 0\n# # https://stackoverflow.com/questions/50689082/to-sql-pyodbc-count-field-incorrect-or-syntax-error\n# skipped_list = []\n# for pid, pid_src_folder in folders_with_documents(pat_ids, main_dir_name, doctype_info['folder']):\n# index_file_path = os.path.join(pid_src_folder, indx_filename)\n# #int_pid = int(pid)\n# pid_end = int(pid)\n# try:\n# df = pd.read_csv(index_file_path, index_col=None, header=0,\n# converters=doctype_info['converters'],\n# # date_parser=pd.core.tools.datetimes.to_datetime,\n# # parse_dates=doctype_info['dates']\n# )\n# df['PID'] = pid\n#\n# #len_df = len(df)\n#\n# # if len_df > 50:\n# # df_1 = pd.concat(list_)\n# # if write_to_db_isok(df_1, f'{pid_start} - {pid_end}'):\n# # logger.info(f'Wrote to DB..upto pid: {pid} dfrows: {df_rows}')\n# # else:\n# # logger.error(f'Skipping batch because of errors : {pid_start} - {pid_end} dfrows: {df_rows} ')\n# # skipped_list.extend(range(pid_start, pid_end))\n# # pid_start = pid_end + 1\n# #\n# # else:\n# # list_.append(df)\n# # df_rows += len_df\n#\n# list_.append(df)\n# df_rows += len(df)\n# if len(list_) > 5 or df_rows > 30:\n# df = pd.concat(list_)\n# if write_to_db_isok(df, f'{pid_start} - {pid_end}'):\n# logger.info(f'Wrote to DB..upto pid: {pid} dfrows: {df_rows}')\n# else:\n# logger.error(f'Skipping batch because of errors : {pid_start} - {pid_end} dfrows: {df_rows} ')\n# skipped_list.extend(range(pid_start, pid_end))\n# list_ = []\n# pid_start = pid_end+1\n# df_rows = 0\n#\n# except pd.errors.ParserError as pe:\n# logger.error(f\"Something went wrong (ParseError) pid:{pid}. skipping..\")\n# list_ = []\n# pid_start = pid_end + 1\n# df_rows = 0\n# continue\n#\n# if list_:\n# df = pd.concat(list_)\n# if write_to_db_isok(df, {pid_start} - {pid_end}):\n# logger.info(f'Wrote to DB..upto pid: {pid}')\n#\n# logger.warning(f'Skipped list of pids: {\",\".join([str(i) for i in skipped_list])}')\n\n\ndef getvalid_src_dest_filepaths(config, pat_ids, main_dir_name, doctype_info):\n idx_chkr_cls = doctype_info['class'].initialize(config)\n log, indx_filename = doctype_info['log'], idx_chkr_cls.__name__ + '.csv'\n\n for pid, pid_src_folder in folders_with_documents(pat_ids, main_dir_name, doctype_info['folder']):\n index_file_path = os.path.join(pid_src_folder, indx_filename)\n idxfile_data, built_src_filenames = idx_chkr_cls.get_idxfile_data(pid, index_file_path)\n if not idxfile_data:\n logger.debug(f'{pid}{log} | Issue(s) with IndexFile. Skipping..')\n continue\n not_in_indexbuilt_files, not_in_folderlisting = source_files.validate(pid, built_src_filenames,\n pid_src_folder, log, indx_filename)\n if any([*not_in_indexbuilt_files, *not_in_folderlisting]):\n logger.debug(f'{pid}{log} | invalid folder. skipping')\n continue\n # . . . . . . . . . . . folder listing should match the file names built from index file . . . . . . . .\n for idxfile_row, built_src_file in zip(idxfile_data, built_src_filenames):\n destn_filename = idx_chkr_cls.get_destination_filename(idxfile_row)\n if not destn_filename:\n logger.error(f'{pid}{log} | Could not form the destination filename string')\n continue\n logger.info(f'{pid}{log} | src: {built_src_file} dest:{destn_filename}')\n yield pid, built_src_file, destn_filename\n\n\ndef main():\n config = setup_cli(sys.argv[1:])\n logger.info(f'{sys.argv[0]} -f {config.file_type} -s {config.start_range}'\n f' -e {config.end_range} -d {config.base_directory}')\n\n docinfo = DOC_TYPE[config.file_type]\n main_dir_name = os.path.join(config.base_directory, 'Patients')\n results_doc = os.path.join(os.path.join(config.base_directory, 'results'), docinfo['folder'])\n logger.debug(f'results doc : {results_doc}')\n\n parser = ConfigParser()\n parser.read('./config/settings.indexfile.ini')\n\n outfile = os.path.join('output', f\"{docinfo['folder']}.paths_pids.{config.start_range}-{config.end_range}.csv\")\n if not os.path.isfile(outfile):\n with open(outfile, 'w', newline='') as csv_file:\n write_to_file(\"pid\", \"src\", \"dest\", csv_file, \"size\")\n\n #indxfiles_todb(parser, range(config.start_range, config.end_range),\n # main_dir_name, doctype_info)\n\n with open(outfile, 'a', newline='') as csv_file:\n for p, s, d in getvalid_src_dest_filepaths(parser, range(config.start_range, config.end_range),\n main_dir_name, docinfo):\n\n full_src_path = f\"{main_dir_name}{os.sep}{p}{os.sep}{docinfo['folder']}{os.sep}{s}\"\n write_to_file(p, s, d, csv_file, os.path.getsize(full_src_path))\n\n\nif __name__ == \"__main__\":\n main()\n # from checkers import sanitize\n # print(sanitize.fix_altdoctitle('None'))\n # print(sanitize.fix_doctitle('None'))\n # print(sanitize.fix_timestamp('None'))\n # print(sanitize.fix_pid('None'))\n # print(sanitize.fix_fileno('None'))\n # print(sanitize.fix_ext('...'))\n\n # print(sanitize.fix_altdoctitle(None))\n # print(sanitize.fix_doctitle(None))\n # print(sanitize.fix_timestamp(None))\n # print(sanitize.fix_pid(None))\n # print(sanitize.fix_fileno(None))\n # print(sanitize.fix_ext('aa'))\n # print(sanitize.fix_ext('asd'))\n # print(sanitize.fix_ext('bbbb'))\n # print(sanitize.fix_ext('ccccc'))\n\n # print(sanitize.fix_pid('asdf'))\n # print(sanitize.fix_fileno(3.4))\n\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
""" dansfunctions - various useful functions in python usage: >>import dansfunctions >>dansfunctions.fg # module of general mathematical, vector and string format functions >>dansfunctions.fp # module of matplotlib shortcuts >>dansfunctions.widgets # module of tkinter shortcuts Requirements: numpy Optional requirements: matplotlib, tkinter """ from . import functions_general as fg try: import matplotlib matplotlib.use('TkAgg') from . import functions_plotting as fp except ImportError: fp = None print('Matplotlib may not be available') try: from .tkgui import basic_widgets as widgets except ImportError: widgets = None print('tkinter may not be available') def version_info(): return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__) def module_info(): import sys out = 'Python version %s' % sys.version out += '\n%s' % version_info() # Modules out += '\n numpy version: %s' % fg.np.__version__ try: import matplotlib out += '\nmatplotlib version: %s' % matplotlib.__version__ except ImportError: out += '\nmatplotlib version: None' try: import tkinter out += '\n tkinter version: %s' % tkinter.TkVersion except ImportError: out += '\n tkinter version: None' return out def check_general_functions(): print('dansfunctions/functions_general.py') print('Version: %s (%s)' % (fg.__version__, fg.__date__)) print('Methods:') print(fg.list_methods(fg, False)) def check_plotting_functions(): print('dansfunctions/functions_plotting.py') if fp is None: print('Matplotlib may not be available') return print('Version: %s (%s)' % (fp.__version__, fp.__date__)) print('Methods:') print(fg.list_methods(fp, False)) def check_tkinter_functions(): print('dansfunctions/tkgui/basic_widgets.py') if widgets is None: print('tkinter may not be available') return print('Version: %s (%s)' % (widgets.__version__, widgets.__date__)) print('Methods:') print(fg.list_methods(widgets, False))
normal
{ "blob_id": "0f266db39988cfce475380036f4f4f5b1a1fee1a", "index": 3647, "step-1": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\n<mask token>\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-2": "<mask token>\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-3": "<mask token>\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-4": "<mask token>\nfrom . import functions_general as fg\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n out += \"\"\"\n numpy version: %s\"\"\" % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-5": "\"\"\"\ndansfunctions - various useful functions in python\nusage:\n>>import dansfunctions\n>>dansfunctions.fg # module of general mathematical, vector and string format functions\n>>dansfunctions.fp # module of matplotlib shortcuts\n>>dansfunctions.widgets # module of tkinter shortcuts\n\nRequirements: numpy\nOptional requirements: matplotlib, tkinter\n\"\"\"\n\nfrom . import functions_general as fg\n\ntry:\n import matplotlib\n matplotlib.use('TkAgg')\n from . import functions_plotting as fp\nexcept ImportError:\n fp = None\n print('Matplotlib may not be available')\n\ntry:\n from .tkgui import basic_widgets as widgets\nexcept ImportError:\n widgets = None\n print('tkinter may not be available')\n\n\ndef version_info():\n return 'dansfunctions version %s (%s)' % (fg.__version__, fg.__date__)\n\n\ndef module_info():\n import sys\n out = 'Python version %s' % sys.version\n out += '\\n%s' % version_info()\n # Modules\n out += '\\n numpy version: %s' % fg.np.__version__\n try:\n import matplotlib\n out += '\\nmatplotlib version: %s' % matplotlib.__version__\n except ImportError:\n out += '\\nmatplotlib version: None'\n try:\n import tkinter\n out += '\\n tkinter version: %s' % tkinter.TkVersion\n except ImportError:\n out += '\\n tkinter version: None'\n return out\n\n\ndef check_general_functions():\n print('dansfunctions/functions_general.py')\n print('Version: %s (%s)' % (fg.__version__, fg.__date__))\n print('Methods:')\n print(fg.list_methods(fg, False))\n\n\ndef check_plotting_functions():\n print('dansfunctions/functions_plotting.py')\n if fp is None:\n print('Matplotlib may not be available')\n return\n print('Version: %s (%s)' % (fp.__version__, fp.__date__))\n print('Methods:')\n print(fg.list_methods(fp, False))\n\n\ndef check_tkinter_functions():\n print('dansfunctions/tkgui/basic_widgets.py')\n if widgets is None:\n print('tkinter may not be available')\n return\n print('Version: %s (%s)' % (widgets.__version__, widgets.__date__))\n print('Methods:')\n print(fg.list_methods(widgets, False))\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn import metrics, ensemble, linear_model, svm from numpy import log, ones, array, zeros, mean, std, repeat import numpy as np import scipy.sparse as sp import re import csv from time import time import functools from nltk.util import skipgrams from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from nltk.tokenize import word_tokenize DIR_PATH = "" TRAIN_FILE = DIR_PATH + "train.csv" TEST_SOL_FILE = DIR_PATH + "test_with_solutions.csv" # This is also used for training, together with TRAIN_FILE BADWORDS_FILE = DIR_PATH + "bad_words.txt" # attached with submission TEST_FILE = DIR_PATH + "test.csv" # set this to the new test file name PREDICTION_FILE = DIR_PATH + "preds.csv" # predictions will be written here def normalize(f , lammatize= False): f = [x.lower() for x in f] f = [x.replace("\\n"," ") for x in f] f = [x.replace("\\t"," ") for x in f] f = [x.replace("\\xa0"," ") for x in f] f = [x.replace("\\xc2"," ") for x in f] #f = [x.replace(","," ").replace("."," ").replace(" ", " ") for x in f] #f = [re.subn(" ([a-z]) ","\\1", x)[0] for x in f] #f = [x.replace(" "," ") for x in f] f = [x.replace(" u "," you ") for x in f] f = [x.replace(" em "," them ") for x in f] f = [x.replace(" da "," the ") for x in f] f = [x.replace(" yo "," you ") for x in f] f = [x.replace(" ur "," you ") for x in f] #f = [x.replace(" ur "," your ") for x in f] #f = [x.replace(" ur "," you're ") for x in f] f = [x.replace("won't", "will not") for x in f] f = [x.replace("can't", "cannot") for x in f] f = [x.replace("i'm", "i am") for x in f] f = [x.replace(" im ", " i am ") for x in f] f = [x.replace("ain't", "is not") for x in f] f = [x.replace("'ll", " will") for x in f] f = [x.replace("'t", " not") for x in f] f = [x.replace("'ve", " have") for x in f] f = [x.replace("'s", " is") for x in f] f = [x.replace("'re", " are") for x in f] f = [x.replace("'d", " would") for x in f] #f = [x.replace("outta", "out of") for x in f] bwMap = loadBW() for key, value in bwMap.items(): kpad = " " + key + " " vpad = " " + value + " " f = [x.replace(kpad, vpad) for x in f] # stemming """ f = [re.subn("ies( |$)", "y ", x)[0].strip() for x in f] #f = [re.subn("([abcdefghijklmnopqrstuvwxyz])s( |$)", "\\1 ", x)[0].strip() for x in f] f = [re.subn("s( |$)", " ", x)[0].strip() for x in f] f = [re.subn("ing( |$)", " ", x)[0].strip() for x in f] f = [x.replace("tard ", " ") for x in f] f = [re.subn(" [*$%&#@][*$%&#@]+"," xexp ", x)[0].strip() for x in f] f = [re.subn(" [0-9]+ "," DD ", x)[0].strip() for x in f] f = [re.subn("<\S*>","", x)[0].strip() for x in f] """ tokenized_sents = [word_tokenize(i) for i in f] if not lammatize: stemmer = PorterStemmer() for i in range (0, len(tokenized_sents)): for j in range (0,len(tokenized_sents[i])): tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j]) else: lammatizer = WordNetLemmatizer() for i in range (0, len(tokenized_sents)): for j in range (0,len(tokenized_sents[i])): tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j]) for i in range (0, len(tokenized_sents)): f[i] = " ".join(tokenized_sents[i]) return f def ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False): f = data if do_normalization: f = normalize(f) ftrain = f[:ntrain] ftest = f[ntrain:] y_train = labels[:ntrain] t0 = time() analyzer_type = 'word' if analyzer_char: analyzer_type = 'char' if binary: vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True) elif stopwords: vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True) else: vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type) if verbose: print ("extracting ngrams... where n is [%d,%d]" % (max_ngrams,min_ngrams)) X_train = vectorizer.fit_transform(ftrain) X_test = vectorizer.transform(ftest) if verbose: print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape) y = array(y_train) numFts = no_of_features if numFts < X_train.shape[1]: t0 = time() ch2 = SelectKBest(chi2, k=numFts) X_train = ch2.fit_transform(X_train, y) X_test = ch2.transform(X_test) assert sp.issparse(X_train) if verbose: print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape ) return X_train, y, X_test def skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True): f = data if do_normalization: f = normalize(f) ftrain = f[:ntrain] ftest = f[ntrain:] y_train = labels[:ntrain] t0 = time() skipper = functools.partial(skipgrams, n=2, k=3) vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper) X_train = vectorizer.fit_transform(ftrain) X_test = vectorizer.transform(ftest) if verbose: print ("done in %fs" % (time() - t0), X_train.shape, X_test.shape) y = array(y_train) numFts = nm if numFts < X_train.shape[1]: t0 = time() ch2 = SelectKBest(chi2, k=numFts) X_train = ch2.fit_transform(X_train, y) X_test = ch2.transform(X_test) assert sp.issparse(X_train) if verbose: print ("Extracting best features by a chi-squared test.. ", X_train.shape, X_test.shape) return X_train, y, X_test def specialCases(data, labels, ntrain, verbose = True): g = [x.lower().replace("you are"," SSS ").replace("you're"," SSS ").replace(" ur ", " SSS ").split("SSS")[1:] for x in data] f = [] for x in g: fts = " " x = normalize(x) for y in x: w = y.strip().replace("?",".").split(".") fts = fts + " " + w[0] f.append(fts) X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose) return X_trn, y_trn, X_tst def loadBW(): f = open(BADWORDS_FILE, "r") bwMap = dict() for line in f: sp = line.strip().lower().split(",") if len(sp) == 2: bwMap[sp[0].strip()] = sp[1].strip() return bwMap def readCsv(fname, skipFirst=True, delimiter = ","): reader = csv.reader(open(fname),delimiter=delimiter) rows = [] count = 1 for row in reader: if not skipFirst or count > 1: rows.append(row) count += 1 return rows def write_submission(x,filename): wtr = open(filename,"w") for i in range(len(x)): wtr.write(format(x[i],"0.10f")) wtr.write("\n") wtr.close() def run(verbose = True): t0 = time() train_data = readCsv(TRAIN_FILE) train2_data = readCsv(TEST_SOL_FILE) train_data = train_data + train2_data # print(train_data) labels = array([int(x[0]) for x in train_data]) # print(labels) train = [x[2] for x in train_data] test_data = readCsv(TEST_FILE) test_data = [x[2] for x in test_data] data = train + test_data n = len(data) ntrain = len(train) X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose) """ X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose) X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose) X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose) X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True) X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose) X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose) X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8]) X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8]) if verbose: print "######## Total time for feature extraction: %fs" % (time() - t0), X_tn.shape, X_tt.shape predictions = runClassifiers(X_tn, labels, X_tt) write_submission(predictions, PREDICTION_FILE) print "Predictions written to:", PREDICTION_FILE """ run() #some code for n grams (use tdifvectorizer)
normal
{ "blob_id": "91eb0ae8e59f24aeefdabd46546bc8fb7a0b6f6c", "index": 3833, "step-1": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\n<mask token>\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n", "step-4": "<mask token>\nDIR_PATH = ''\nTRAIN_FILE = DIR_PATH + 'train.csv'\nTEST_SOL_FILE = DIR_PATH + 'test_with_solutions.csv'\nBADWORDS_FILE = DIR_PATH + 'bad_words.txt'\nTEST_FILE = DIR_PATH + 'test.csv'\nPREDICTION_FILE = DIR_PATH + 'preds.csv'\n\n\ndef normalize(f, lammatize=False):\n f = [x.lower() for x in f]\n f = [x.replace('\\\\n', ' ') for x in f]\n f = [x.replace('\\\\t', ' ') for x in f]\n f = [x.replace('\\\\xa0', ' ') for x in f]\n f = [x.replace('\\\\xc2', ' ') for x in f]\n f = [x.replace(' u ', ' you ') for x in f]\n f = [x.replace(' em ', ' them ') for x in f]\n f = [x.replace(' da ', ' the ') for x in f]\n f = [x.replace(' yo ', ' you ') for x in f]\n f = [x.replace(' ur ', ' you ') for x in f]\n f = [x.replace(\"won't\", 'will not') for x in f]\n f = [x.replace(\"can't\", 'cannot') for x in f]\n f = [x.replace(\"i'm\", 'i am') for x in f]\n f = [x.replace(' im ', ' i am ') for x in f]\n f = [x.replace(\"ain't\", 'is not') for x in f]\n f = [x.replace(\"'ll\", ' will') for x in f]\n f = [x.replace(\"'t\", ' not') for x in f]\n f = [x.replace(\"'ve\", ' have') for x in f]\n f = [x.replace(\"'s\", ' is') for x in f]\n f = [x.replace(\"'re\", ' are') for x in f]\n f = [x.replace(\"'d\", ' would') for x in f]\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = ' ' + key + ' '\n vpad = ' ' + value + ' '\n f = [x.replace(kpad, vpad) for x in f]\n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range(0, len(tokenized_sents)):\n for j in range(0, len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents\n [i][j])\n for i in range(0, len(tokenized_sents)):\n f[i] = ' '.join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features\n =500, binary=False, do_normalization=False, stopwords=False, verbose=\n True, analyzer_char=False):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n if binary:\n vectorizer = CountVectorizer(ngram_range=(min_ngrams, max_ngrams),\n binary=True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n stop_words='english', analyzer=analyzer_type, sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range=(min_ngrams, max_ngrams),\n sublinear_tf=True, analyzer=analyzer_type)\n if verbose:\n print('extracting ngrams... where n is [%d,%d]' % (max_ngrams,\n min_ngrams))\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain, nm=500, min_ngrams=1, max_ngrams=1,\n no_of_features=500, do_normalization=False, verbose=True):\n f = data\n if do_normalization:\n f = normalize(f)\n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n vectorizer = TfidfVectorizer(sublinear_tf=True, analyzer=skipper)\n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n if verbose:\n print('done in %fs' % (time() - t0), X_train.shape, X_test.shape)\n y = array(y_train)\n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train)\n if verbose:\n print('Extracting best features by a chi-squared test.. ', X_train.\n shape, X_test.shape)\n return X_train, y, X_test\n\n\ndef specialCases(data, labels, ntrain, verbose=True):\n g = [x.lower().replace('you are', ' SSS ').replace(\"you're\", ' SSS ').\n replace(' ur ', ' SSS ').split('SSS')[1:] for x in data]\n f = []\n for x in g:\n fts = ' '\n x = normalize(x)\n for y in x:\n w = y.strip().replace('?', '.').split('.')\n fts = fts + ' ' + w[0]\n f.append(fts)\n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100,\n do_normalization=True, verbose=verbose)\n return X_trn, y_trn, X_tst\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, 'r')\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(',')\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n\ndef readCsv(fname, skipFirst=True, delimiter=','):\n reader = csv.reader(open(fname), delimiter=delimiter)\n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1:\n rows.append(row)\n count += 1\n return rows\n\n\ndef write_submission(x, filename):\n wtr = open(filename, 'w')\n for i in range(len(x)):\n wtr.write(format(x[i], '0.10f'))\n wtr.write('\\n')\n wtr.close()\n\n\ndef run(verbose=True):\n t0 = time()\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n train_data = train_data + train2_data\n labels = array([int(x[0]) for x in train_data])\n train = [x[2] for x in train_data]\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data]\n data = train + test_data\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose\n =verbose)\n\n\n<mask token>\nrun()\n", "step-5": "from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom sklearn import metrics, ensemble, linear_model, svm\nfrom numpy import log, ones, array, zeros, mean, std, repeat\nimport numpy as np\nimport scipy.sparse as sp\nimport re\nimport csv\nfrom time import time\nimport functools\nfrom nltk.util import skipgrams\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.stem import PorterStemmer\nfrom nltk.tokenize import word_tokenize\n\n\nDIR_PATH = \"\"\n\nTRAIN_FILE = DIR_PATH + \"train.csv\"\nTEST_SOL_FILE = DIR_PATH + \"test_with_solutions.csv\" # This is also used for training, together with TRAIN_FILE\nBADWORDS_FILE = DIR_PATH + \"bad_words.txt\" # attached with submission \n\nTEST_FILE = DIR_PATH + \"test.csv\" # set this to the new test file name\nPREDICTION_FILE = DIR_PATH + \"preds.csv\" # predictions will be written here \n\ndef normalize(f , lammatize= False):\n f = [x.lower() for x in f]\n f = [x.replace(\"\\\\n\",\" \") for x in f] \n f = [x.replace(\"\\\\t\",\" \") for x in f] \n f = [x.replace(\"\\\\xa0\",\" \") for x in f]\n f = [x.replace(\"\\\\xc2\",\" \") for x in f]\n\n #f = [x.replace(\",\",\" \").replace(\".\",\" \").replace(\" \", \" \") for x in f]\n #f = [re.subn(\" ([a-z]) \",\"\\\\1\", x)[0] for x in f] \n #f = [x.replace(\" \",\" \") for x in f]\n\n f = [x.replace(\" u \",\" you \") for x in f]\n f = [x.replace(\" em \",\" them \") for x in f]\n f = [x.replace(\" da \",\" the \") for x in f]\n f = [x.replace(\" yo \",\" you \") for x in f]\n f = [x.replace(\" ur \",\" you \") for x in f]\n #f = [x.replace(\" ur \",\" your \") for x in f]\n #f = [x.replace(\" ur \",\" you're \") for x in f]\n \n f = [x.replace(\"won't\", \"will not\") for x in f]\n f = [x.replace(\"can't\", \"cannot\") for x in f]\n f = [x.replace(\"i'm\", \"i am\") for x in f]\n f = [x.replace(\" im \", \" i am \") for x in f]\n f = [x.replace(\"ain't\", \"is not\") for x in f]\n f = [x.replace(\"'ll\", \" will\") for x in f]\n f = [x.replace(\"'t\", \" not\") for x in f]\n f = [x.replace(\"'ve\", \" have\") for x in f]\n f = [x.replace(\"'s\", \" is\") for x in f]\n f = [x.replace(\"'re\", \" are\") for x in f]\n f = [x.replace(\"'d\", \" would\") for x in f]\n\n #f = [x.replace(\"outta\", \"out of\") for x in f]\n\n bwMap = loadBW()\n for key, value in bwMap.items():\n kpad = \" \" + key + \" \"\n vpad = \" \" + value + \" \"\n f = [x.replace(kpad, vpad) for x in f]\n \n # stemming \n \"\"\"\n f = [re.subn(\"ies( |$)\", \"y \", x)[0].strip() for x in f]\n #f = [re.subn(\"([abcdefghijklmnopqrstuvwxyz])s( |$)\", \"\\\\1 \", x)[0].strip() for x in f]\n f = [re.subn(\"s( |$)\", \" \", x)[0].strip() for x in f]\n f = [re.subn(\"ing( |$)\", \" \", x)[0].strip() for x in f]\n f = [x.replace(\"tard \", \" \") for x in f]\n \n f = [re.subn(\" [*$%&#@][*$%&#@]+\",\" xexp \", x)[0].strip() for x in f]\n f = [re.subn(\" [0-9]+ \",\" DD \", x)[0].strip() for x in f]\n f = [re.subn(\"<\\S*>\",\"\", x)[0].strip() for x in f] \n \"\"\"\n tokenized_sents = [word_tokenize(i) for i in f]\n if not lammatize:\n stemmer = PorterStemmer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = stemmer.stem(tokenized_sents[i][j])\n else:\n lammatizer = WordNetLemmatizer()\n for i in range (0, len(tokenized_sents)):\n for j in range (0,len(tokenized_sents[i])):\n tokenized_sents[i][j] = lammatizer.lemmatize(tokenized_sents[i][j]) \n for i in range (0, len(tokenized_sents)):\n f[i] = \" \".join(tokenized_sents[i])\n return f\n\n\ndef ngrams(data, labels, ntrain, min_ngrams=1, max_ngrams=1, no_of_features=500, binary = False, do_normalization = False, stopwords = False, verbose = True, analyzer_char = False):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n \n t0 = time()\n analyzer_type = 'word'\n if analyzer_char:\n analyzer_type = 'char'\n \n if binary:\n vectorizer = CountVectorizer(ngram_range = (min_ngrams , max_ngrams), binary =True)\n elif stopwords:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),stop_words='english',analyzer=analyzer_type,sublinear_tf=True)\n else:\n vectorizer = TfidfVectorizer(ngram_range = (min_ngrams , max_ngrams),sublinear_tf=True,analyzer=analyzer_type)\n\n if verbose:\n print (\"extracting ngrams... where n is [%d,%d]\" % (max_ngrams,min_ngrams))\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = no_of_features\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n\n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape ) \n return X_train, y, X_test\n\n\ndef skipGrams(data, labels, ntrain,nm=500,min_ngrams=1, max_ngrams=1, no_of_features=500, do_normalization = False, verbose = True):\n f = data\n if do_normalization:\n f = normalize(f)\n \n ftrain = f[:ntrain]\n ftest = f[ntrain:]\n y_train = labels[:ntrain]\n t0 = time()\n skipper = functools.partial(skipgrams, n=2, k=3)\n \n vectorizer = TfidfVectorizer(sublinear_tf=True,analyzer=skipper)\n \n X_train = vectorizer.fit_transform(ftrain)\n X_test = vectorizer.transform(ftest)\n \n if verbose:\n print (\"done in %fs\" % (time() - t0), X_train.shape, X_test.shape)\n\n y = array(y_train) \n \n numFts = nm\n if numFts < X_train.shape[1]:\n t0 = time()\n ch2 = SelectKBest(chi2, k=numFts)\n X_train = ch2.fit_transform(X_train, y)\n X_test = ch2.transform(X_test)\n assert sp.issparse(X_train) \n if verbose:\n print (\"Extracting best features by a chi-squared test.. \", X_train.shape, X_test.shape) \n return X_train, y, X_test\n\n\n\ndef specialCases(data, labels, ntrain, verbose = True):\n g = [x.lower().replace(\"you are\",\" SSS \").replace(\"you're\",\" SSS \").replace(\" ur \", \" SSS \").split(\"SSS\")[1:] for x in data]\n\n f = []\n for x in g:\n fts = \" \"\n x = normalize(x)\n for y in x:\n w = y.strip().replace(\"?\",\".\").split(\".\")\n fts = fts + \" \" + w[0] \n f.append(fts)\n \n X_trn, y_trn, X_tst = ngrams(f, labels, ntrain, 1, 1, 100, do_normalization = True, verbose = verbose)\n return X_trn, y_trn, X_tst\n\n\n\ndef loadBW():\n f = open(BADWORDS_FILE, \"r\")\n bwMap = dict()\n for line in f:\n sp = line.strip().lower().split(\",\")\n if len(sp) == 2:\n bwMap[sp[0].strip()] = sp[1].strip()\n return bwMap\n\n \n\ndef readCsv(fname, skipFirst=True, delimiter = \",\"):\n reader = csv.reader(open(fname),delimiter=delimiter)\n \n rows = []\n count = 1\n for row in reader:\n if not skipFirst or count > 1: \n rows.append(row)\n count += 1\n return rows\n\ndef write_submission(x,filename):\n wtr = open(filename,\"w\")\n for i in range(len(x)):\n wtr.write(format(x[i],\"0.10f\"))\n wtr.write(\"\\n\")\n wtr.close()\n\ndef run(verbose = True):\n t0 = time()\n\n train_data = readCsv(TRAIN_FILE)\n train2_data = readCsv(TEST_SOL_FILE)\n \n train_data = train_data + train2_data\n # print(train_data)\n labels = array([int(x[0]) for x in train_data])\n # print(labels) \n train = [x[2] for x in train_data]\n\n test_data = readCsv(TEST_FILE)\n test_data = [x[2] for x in test_data] \n \n data = train + test_data\n\n n = len(data)\n ntrain = len(train)\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n \n\"\"\"\n X_train1, y_train, X_test1 = ngrams(data, labels, ntrain, 1, 1, 2000, do_normalization = True, verbose = verbose)\n \n X_train2, y_train, X_test2 = ngrams(data, labels, ntrain, 2, 2, 4000, do_normalization = True, verbose = verbose)\n X_train3, y_train, X_test3 = ngrams(data, labels, ntrain, 3, 3, 100, do_normalization = True, verbose = verbose) \n X_train4, y_train, X_test4 = ngrams(data, labels, ntrain, 4, 4, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train5, y_train, X_test5 = ngrams(data, labels, ntrain, 5, 5, 1000, do_normalization = True, verbose = verbose, analyzer_char = True) \n X_train6, y_train, X_test6 = ngrams(data, labels, ntrain, 3, 3, 2000, do_normalization = True, verbose = verbose, analyzer_char = True) \n\n X_train7, y_train, X_test7 = specialCases(data, labels, ntrain, verbose = verbose)\n X_train8, y_train, X_test8 = skipGrams(data, labels, ntrain, verbose = verbose)\n\n X_tn = sp.hstack([X_train1, X_train2, X_train3, X_train4, X_train5, X_train6, X_train7, X_train8])\n X_tt = sp.hstack([X_test1, X_test2, X_test3, X_test4, X_test5, X_test6, X_test7, X_test8])\n \n if verbose:\n print \"######## Total time for feature extraction: %fs\" % (time() - t0), X_tn.shape, X_tt.shape\n \n predictions = runClassifiers(X_tn, labels, X_tt)\n \n write_submission(predictions, PREDICTION_FILE) \n print \"Predictions written to:\", PREDICTION_FILE\n\"\"\"\n\nrun()\n#some code for n grams (use tdifvectorizer)\n\n\n\n\n\n", "step-ids": [ 7, 8, 9, 10, 12 ] }
[ 7, 8, 9, 10, 12 ]
"Unit tests for reverse URL lookup" from django.core.urlresolvers import reverse_helper, NoReverseMatch import re, unittest test_data = ( ('^places/(\d+)/$', 'places/3/', [3], {}), ('^places/(\d+)/$', 'places/3/', ['3'], {}), ('^places/(\d+)/$', NoReverseMatch, ['a'], {}), ('^places/(\d+)/$', NoReverseMatch, [], {}), ('^places/(?P<id>\d+)/$', 'places/3/', [], {'id': 3}), ('^people/(?P<name>\w+)/$', 'people/adrian/', ['adrian'], {}), ('^people/(?P<name>\w+)/$', 'people/adrian/', [], {'name': 'adrian'}), ('^people/(?P<name>\w+)/$', NoReverseMatch, ['name with spaces'], {}), ('^people/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'name with spaces'}), ('^people/(?P<name>\w+)/$', NoReverseMatch, [], {}), ('^hardcoded/$', 'hardcoded/', [], {}), ('^hardcoded/$', 'hardcoded/', ['any arg'], {}), ('^hardcoded/$', 'hardcoded/', [], {'kwarg': 'foo'}), ('^people/(?P<state>\w\w)/(?P<name>\w+)/$', 'people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}), ('^people/(?P<state>\w\w)/(?P<name>\d)/$', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}), ('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'state': 'il'}), ('^people/(?P<state>\w\w)/(?P<name>\w+)/$', NoReverseMatch, [], {'name': 'adrian'}), ('^people/(?P<state>\w\w)/(\w+)/$', NoReverseMatch, ['il'], {'name': 'adrian'}), ('^people/(?P<state>\w\w)/(\w+)/$', 'people/il/adrian/', ['adrian'], {'state': 'il'}), ) class URLPatternReverse(unittest.TestCase): def test_urlpattern_reverse(self): for regex, expected, args, kwargs in test_data: try: got = reverse_helper(re.compile(regex), *args, **kwargs) except NoReverseMatch, e: self.assertEqual(expected, NoReverseMatch) else: self.assertEquals(got, expected) if __name__ == "__main__": run_tests(1)
normal
{ "blob_id": "b7ccb41c43a0db6f1bf9e6ba5cef1b9b1417e297", "index": 633, "step-1": "\"Unit tests for reverse URL lookup\"\n\nfrom django.core.urlresolvers import reverse_helper, NoReverseMatch\nimport re, unittest\n\ntest_data = (\n ('^places/(\\d+)/$', 'places/3/', [3], {}),\n ('^places/(\\d+)/$', 'places/3/', ['3'], {}),\n ('^places/(\\d+)/$', NoReverseMatch, ['a'], {}),\n ('^places/(\\d+)/$', NoReverseMatch, [], {}),\n ('^places/(?P<id>\\d+)/$', 'places/3/', [], {'id': 3}),\n ('^people/(?P<name>\\w+)/$', 'people/adrian/', ['adrian'], {}),\n ('^people/(?P<name>\\w+)/$', 'people/adrian/', [], {'name': 'adrian'}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, ['name with spaces'], {}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, [], {'name': 'name with spaces'}),\n ('^people/(?P<name>\\w+)/$', NoReverseMatch, [], {}),\n ('^hardcoded/$', 'hardcoded/', [], {}),\n ('^hardcoded/$', 'hardcoded/', ['any arg'], {}),\n ('^hardcoded/$', 'hardcoded/', [], {'kwarg': 'foo'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', 'people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\d)/$', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', NoReverseMatch, [], {'state': 'il'}),\n ('^people/(?P<state>\\w\\w)/(?P<name>\\w+)/$', NoReverseMatch, [], {'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(\\w+)/$', NoReverseMatch, ['il'], {'name': 'adrian'}),\n ('^people/(?P<state>\\w\\w)/(\\w+)/$', 'people/il/adrian/', ['adrian'], {'state': 'il'}),\n)\n\nclass URLPatternReverse(unittest.TestCase):\n def test_urlpattern_reverse(self):\n for regex, expected, args, kwargs in test_data:\n try:\n got = reverse_helper(re.compile(regex), *args, **kwargs)\n except NoReverseMatch, e:\n self.assertEqual(expected, NoReverseMatch)\n else:\n self.assertEquals(got, expected)\n\nif __name__ == \"__main__\":\n run_tests(1)\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright 2021 Opensource ICT Solutions B.V. # https://oicts.com # #version: 1.0.0 #date: 06-11-2021 import requests import json import sys url = 'http://<URL>/zabbix/api_jsonrpc.php?' token = '<TOKEN>' headers = {'Content-Type': 'application/json'} hostname = sys.argv[1] def main(): hostid = hostid_get(token) itemid_array = itemid_get(hostid,token) update(itemid_array,token) def hostid_get(token): payload = {} payload['jsonrpc'] = '2.0' payload['method'] = 'host.get' payload['params'] = {} payload['params']['output'] = ['hostid'] payload['params']['filter'] = {} payload['params']['filter']['host'] = hostname payload['auth'] = token payload['id'] = 1 #Doing the request request = requests.post(url, data=json.dumps(payload), headers=headers) data = request.json() hostid = data["result"][0]["hostid"] return hostid def itemid_get(hostid,token): payload = {} payload['jsonrpc'] = '2.0' payload['method'] = 'item.get' payload['params'] = {} payload['params']['output'] = 'itemid' payload['params']['filter'] = {} payload['params']['filter']['host'] = hostname payload['params']['filter']['type'] = "0", "1", "3", "5", "8", "9", "10", "11", "12", "13", "14", "15", "16", "19", "20", "21" payload['auth'] = token payload['id'] = 1 # print(json.dumps(payload)) request = requests.post(url, data=json.dumps(payload), headers=headers) data = request.json() # print(data) itemid_array = [] for itemid in data['result']: itemid_array.append(str(itemid['itemid'])) return itemid_array def update(itemid_array,token): payload = {} payload['jsonrpc'] = '2.0' payload['method'] = 'task.create' payload['params'] = [] for itemid in itemid_array: request = {} request['type'] = '6' request['request'] = {} request['request']['itemid'] = itemid payload['params'].append(request) payload['auth'] = token payload['id'] = 1 #print("payload = " + json.dumps(payload)) request = requests.post(url, data=json.dumps(payload), headers=headers) data = request.json() json_string = json.dumps(data) print(json_string) if __name__ == '__main__': # Call to main main()
normal
{ "blob_id": "18d7c486b9070a1c607ba2ba5876309246013182", "index": 4651, "step-1": "<mask token>\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\nheaders = {'Content-Type': 'application/json'}\nhostname = sys.argv[1]\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import requests\nimport json\nimport sys\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\nheaders = {'Content-Type': 'application/json'}\nhostname = sys.argv[1]\n\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid, token)\n update(itemid_array, token)\n\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n hostid = data['result'][0]['hostid']\n return hostid\n\n\ndef itemid_get(hostid, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = ('0', '1', '3', '5', '8', '9',\n '10', '11', '12', '13', '14', '15', '16', '19', '20', '21')\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\n\ndef update(itemid_array, token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n print(json_string)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Copyright 2021 Opensource ICT Solutions B.V.\n# https://oicts.com\n#\n#version: 1.0.0\n#date: 06-11-2021\n\n\nimport requests\nimport json\nimport sys\n\nurl = 'http://<URL>/zabbix/api_jsonrpc.php?'\ntoken = '<TOKEN>'\n\nheaders = {'Content-Type': 'application/json'}\n\nhostname = sys.argv[1]\n\ndef main():\n hostid = hostid_get(token)\n itemid_array = itemid_get(hostid,token)\n update(itemid_array,token)\n\ndef hostid_get(token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'host.get'\n payload['params'] = {}\n payload['params']['output'] = ['hostid']\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['auth'] = token\n payload['id'] = 1\n\n\n #Doing the request\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n\n hostid = data[\"result\"][0][\"hostid\"]\n return hostid\n\ndef itemid_get(hostid,token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'item.get'\n payload['params'] = {}\n payload['params']['output'] = 'itemid'\n payload['params']['filter'] = {}\n payload['params']['filter']['host'] = hostname\n payload['params']['filter']['type'] = \"0\", \"1\", \"3\", \"5\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"19\", \"20\", \"21\"\n payload['auth'] = token\n payload['id'] = 1\n\n# print(json.dumps(payload))\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n\n# print(data)\n\n itemid_array = []\n for itemid in data['result']:\n itemid_array.append(str(itemid['itemid']))\n return itemid_array\n\ndef update(itemid_array,token):\n payload = {}\n payload['jsonrpc'] = '2.0'\n payload['method'] = 'task.create'\n payload['params'] = []\n for itemid in itemid_array:\n request = {}\n request['type'] = '6'\n request['request'] = {}\n request['request']['itemid'] = itemid\n payload['params'].append(request)\n payload['auth'] = token\n payload['id'] = 1\n\n #print(\"payload = \" + json.dumps(payload))\n request = requests.post(url, data=json.dumps(payload), headers=headers)\n data = request.json()\n json_string = json.dumps(data)\n\n print(json_string)\n\nif __name__ == '__main__':\n # Call to main\n main()\n", "step-ids": [ 4, 5, 6, 7, 8 ] }
[ 4, 5, 6, 7, 8 ]
import unittest from validate_pw_complexity import * class Test_PW_Functions(unittest.TestCase): def test_pw_not_long_enough_min(self): sample_pass ="abcd" expected_result = False result = validate_pw_long(sample_pass) self.assertEqual(expected_result, result) def test_pw_just_long_enough_min(self): sample_pass = "abcdadca" expected_result = False result = validate_pw_long(sample_pass) self.assertEqual(expected_result, result) def test_pw_long_enough_min(self): sample_pass = "abcdadcaabc" expected_result = False result = validate_pw_long(sample_pass) self.assertEqual(expected_result, result)
normal
{ "blob_id": "dc7d75bf43f1ba55673a43f863dd08e99a1c0e0f", "index": 4820, "step-1": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n", "step-2": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n <mask token>\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n", "step-3": "<mask token>\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = 'abcdadca'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n", "step-4": "import unittest\nfrom validate_pw_complexity import *\n\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass = 'abcd'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = 'abcdadca'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = 'abcdadcaabc'\n expected_result = False\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n", "step-5": "import unittest\n\nfrom validate_pw_complexity import *\n\nclass Test_PW_Functions(unittest.TestCase):\n\n def test_pw_not_long_enough_min(self):\n sample_pass =\"abcd\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_just_long_enough_min(self):\n sample_pass = \"abcdadca\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n\n def test_pw_long_enough_min(self):\n sample_pass = \"abcdadcaabc\"\n expected_result = False\n\n result = validate_pw_long(sample_pass)\n self.assertEqual(expected_result, result)\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from django.shortcuts import render class Person(object): def __init__(self,username): self.username = username def index(request): # p = Person("张三") # context = { # 'person': p # } # context = { # 'person': { # 'username':'zhiliao', # } # } # person.keys() context = { 'persons': ( '鲁班一号', '程咬金', '阿珂' ) } return render(request,'index.html',context=context)
normal
{ "blob_id": "6d2bc28e7742f1063a04ae96fc195515ad70598b", "index": 5666, "step-1": "<mask token>\n\n\nclass Person(object):\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\ndef index(request):\n context = {'persons': ('鲁班一号', '程咬金', '阿珂')}\n return render(request, 'index.html', context=context)\n", "step-4": "from django.shortcuts import render\n\n\nclass Person(object):\n\n def __init__(self, username):\n self.username = username\n\n\ndef index(request):\n context = {'persons': ('鲁班一号', '程咬金', '阿珂')}\n return render(request, 'index.html', context=context)\n", "step-5": "from django.shortcuts import render\n\nclass Person(object):\n def __init__(self,username):\n self.username = username\n\ndef index(request):\n # p = Person(\"张三\")\n # context = {\n # 'person': p\n # }\n # context = {\n # 'person': {\n # 'username':'zhiliao',\n # }\n # }\n # person.keys()\n context = {\n 'persons': (\n '鲁班一号',\n '程咬金',\n '阿珂'\n )\n }\n return render(request,'index.html',context=context)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
"""Note: AWS Glue split from spark since it requires different test dependencies.""" from tests.integration.backend_dependencies import BackendDependencies from tests.integration.integration_test_fixture import IntegrationTestFixture aws_glue_integration_tests = [] deployment_patterns = [ # TODO: The AWS_GLUE dependency is only being marked and not run at this time. IntegrationTestFixture( name="how_to_use_great_expectations_in_aws_glue", user_flow_script="tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py", backend_dependencies=[ BackendDependencies.SPARK, BackendDependencies.AWS, BackendDependencies.AWS_GLUE, ], ), ] aws_glue_integration_tests += deployment_patterns
normal
{ "blob_id": "e288403cb310bb7241b25e74d1b5bcc63967128c", "index": 1031, "step-1": "<mask token>\n", "step-2": "<mask token>\naws_glue_integration_tests += deployment_patterns\n", "step-3": "<mask token>\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n", "step-4": "<mask token>\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\naws_glue_integration_tests = []\ndeployment_patterns = [IntegrationTestFixture(name=\n 'how_to_use_great_expectations_in_aws_glue', user_flow_script=\n 'tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py'\n , backend_dependencies=[BackendDependencies.SPARK, BackendDependencies.\n AWS, BackendDependencies.AWS_GLUE])]\naws_glue_integration_tests += deployment_patterns\n", "step-5": "\"\"\"Note: AWS Glue split from spark since it requires different test dependencies.\"\"\"\nfrom tests.integration.backend_dependencies import BackendDependencies\nfrom tests.integration.integration_test_fixture import IntegrationTestFixture\n\naws_glue_integration_tests = []\n\ndeployment_patterns = [\n # TODO: The AWS_GLUE dependency is only being marked and not run at this time.\n IntegrationTestFixture(\n name=\"how_to_use_great_expectations_in_aws_glue\",\n user_flow_script=\"tests/integration/docusaurus/deployment_patterns/aws_glue_deployment_patterns.py\",\n backend_dependencies=[\n BackendDependencies.SPARK,\n BackendDependencies.AWS,\n BackendDependencies.AWS_GLUE,\n ],\n ),\n]\n\naws_glue_integration_tests += deployment_patterns\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
def K_Wilson(w, Tr, Pr): # Inserting necessary libraries import numpy as np # Calculating K-value using Wilson correlation K_value_Output = (1 / Pr) * np.exp(5.37 * (1 + w) * (1 - 1 / Tr)) # Returning output value return K_value_Output
normal
{ "blob_id": "0b42f458097d11d66160bcb8e706ccb9b5c4682a", "index": 5744, "step-1": "<mask token>\n", "step-2": "def K_Wilson(w, Tr, Pr):\n import numpy as np\n K_value_Output = 1 / Pr * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))\n return K_value_Output\n", "step-3": "def K_Wilson(w, Tr, Pr):\r\n \r\n # Inserting necessary libraries\r\n import numpy as np\r\n \r\n # Calculating K-value using Wilson correlation\r\n K_value_Output = (1 / Pr) * np.exp(5.37 * (1 + w) * (1 - 1 / Tr))\r\n \r\n # Returning output value\r\n return K_value_Output", "step-4": null, "step-5": null, "step-ids": [ 0, 1, 2 ] }
[ 0, 1, 2 ]
from flask.ext.restful import Resource, abort from flask_login import current_user, login_required from peewee import DoesNotExist from redash.authentication.org_resolving import current_org from redash.tasks import record_event class BaseResource(Resource): decorators = [login_required] def __init__(self, *args, **kwargs): super(BaseResource, self).__init__(*args, **kwargs) self._user = None def dispatch_request(self, *args, **kwargs): kwargs.pop('org_slug', None) return super(BaseResource, self).dispatch_request(*args, **kwargs) @property def current_user(self): return current_user._get_current_object() @property def current_org(self): return current_org._get_current_object() def record_event(self, options): options.update({ 'user_id': self.current_user.id, 'org_id': self.current_org.id }) record_event.delay(options) def require_fields(req, fields): for f in fields: if f not in req: abort(400) def get_object_or_404(fn, *args, **kwargs): try: return fn(*args, **kwargs) except DoesNotExist: abort(404)
normal
{ "blob_id": "71cdddfdd7c1327a8a77808dbdd0ff98d827231f", "index": 945, "step-1": "<mask token>\n\n\nclass BaseResource(Resource):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n <mask token>\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass BaseResource(Resource):\n <mask token>\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<mask token>\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n", "step-5": "from flask.ext.restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash.authentication.org_resolving import current_org\nfrom redash.tasks import record_event\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({\n 'user_id': self.current_user.id,\n 'org_id': self.current_org.id\n })\n\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n", "step-ids": [ 5, 6, 7, 8, 11 ] }
[ 5, 6, 7, 8, 11 ]
#!/usr/bin/env python #**************************************************************************** # fieldformat.py, provides non-GUI base classes for field formating # # TreeLine, an information storage program # Copyright (C) 2006, Douglas W. Bell # # This is free software; you can redistribute it and/or modify it under the # terms of the GNU General Public License, either Version 2 or any later # version. This program is distributed in the hope that it will be useful, # but WITTHOUT ANY WARRANTY. See the included LICENSE file for details. #**************************************************************************** import re from xml.sax.saxutils import escape, unescape from gennumber import GenNumber, GenNumberError from gendate import GenDate, GenDateError from gentime import GenTime, GenTimeError from genboolean import GenBoolean, GenBooleanError import treedoc import globalref _errorStr = '#####' def xslEscape(text): """Encapsulate all literal text in <xsl:text> elements and transform/escape some non-XML entities. For the moment, only &nbsp; is supported""" nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)') escDict = {'&amp;nbsp;': '&#xa0;'} # escape function does '&' first def esc(matchObj): """Return escaped replacement text""" if matchObj.group(1) == None: # no tags found return u'<xsl:text>%s</xsl:text>' % \ escape(matchObj.group(3), escDict) if matchObj.group(1): # leading text and tag return u'<xsl:text>%s</xsl:text>%s' % \ (escape(matchObj.group(1), escDict), matchObj.group(2)) return matchObj.group(2) # tag only return nonTagRe.sub(esc, text) class TextFormat(object): """Holds format info for a normal text field""" typeName = 'Text' sortSequence = 20 stripTagRe = re.compile('<.*?>') defaultNumLines = 1 #field format edit options: defaultFormat = '' formatMenuList = [] htmlOption = True hasEditChoices = False autoAddChoices = False hasFileBrowse = False allowAltLinkText = False def __init__(self, name, attrs={}): """Any prefix, suffix, html info in attrs dict""" self.name = name self.enName = '' # used only by fileFormat field for i18n self.format = attrs.get(u'format', self.defaultFormat) self.prefix = attrs.get(u'prefix', '') self.suffix = attrs.get(u'suffix', '') # defaults to no html (line breaks preserved) self.html = attrs.get(u'html', '').startswith('y') and True or False self.isRequired = attrs.get(u'required', '').startswith('y') and \ True or False self.hidden = attrs.get(u'hidden', '').startswith('y') and \ True or False try: self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))) except ValueError: self.numLines = 1 self.initDefault = attrs.get(u'init', '') self.linkAltField = attrs.get(u'linkalt', '') self.parentLevel = 0 self.useFileInfo = False self.showInDialog = True self.initFormat() def initFormat(self): """Called by base init, after class change or format text change""" pass def duplicateSettings(self, otherField): """Assign other field's parameters to this field""" self.name = otherField.name self.enName = otherField.enName self.format = otherField.format self.prefix = otherField.prefix self.suffix = otherField.suffix self.html = otherField.html self.isRequired = otherField.isRequired self.hidden = otherField.hidden self.numLines = otherField.numLines self.initDefault = otherField.initDefault self.linkAltField = otherField.linkAltField self.parentLevel = otherField.parentLevel self.useFileInfo = otherField.useFileInfo self.showInDialog = otherField.showInDialog def changeType(self, newType): """Change this field's type to newType with default format""" self.__class__ = globals()[newType + 'Format'] self.format = self.defaultFormat self.initFormat() def englishName(self): """Returns English name if assigned, o/w name""" if self.enName: return self.enName return self.name def sepName(self, englishOnly=False): """Return name enclosed with {* *} separators""" name = englishOnly and self.enName or self.name if not self.useFileInfo: return u'{*%s*}' % name return u'{*!%s*}' % name def labelName(self): """Return name used for labels - add * for required fields""" if self.isRequired: return '%s*' % self.name return self.name def writeXml(self): """Return text for xml attributes""" text = u' type="%s"' % self.typeName if self.format: text += u' format="%s"' % escape(self.format, treedoc.escDict) if self.prefix: text += u' prefix="%s"' % escape(self.prefix, treedoc.escDict) if self.suffix: text += u' suffix="%s"' % escape(self.suffix, treedoc.escDict) if self.html: text += u' html="y"' if self.isRequired: text += u' required="y"' if self.hidden: text += u' hidden="y"' if self.numLines > 1: text += u' lines="%d"' % self.numLines if self.initDefault: text += u' init="%s"' % escape(self.initDefault, treedoc.escDict) if self.linkAltField: text += u' linkalt="%s"' % escape(self.linkAltField, treedoc.escDict) return text def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" if self.useFileInfo: item = globalref.docRef.fileInfoItem storedText = item.data.get(self.name, '') if storedText: return self.formatOutput(storedText, titleMode, internal) return '' def removeMarkup(self, text): """Remove HTML Markup and unescape entities""" text = TextFormat.stripTagRe.sub('', text) return unescape(text) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" prefix = self.prefix suffix = self.suffix if titleMode: if self.html: storedText = self.removeMarkup(storedText) if globalref.docRef.formHtml: prefix = self.removeMarkup(prefix) suffix = self.removeMarkup(suffix) else: if not self.html: storedText = escape(storedText).replace('\n', '<br />') if not globalref.docRef.formHtml: prefix = escape(prefix) suffix = escape(suffix) return u'%s%s%s' % (prefix, storedText, suffix) def editText(self, item): """Return tuple of this field's text in edit format and bool validity, using edit format option""" storedText = item.data.get(self.name, '') result = self.formatEditText(storedText) if self.isRequired and not result[0]: return (result[0], False) return result def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" return (storedText, True) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" return (editText, editText or not self.isRequired) def getInitDefault(self): """Return initial stored value for new nodes""" return self.initDefault def setInitDefault(self, editText): """Set initial value from editor version using edit format option""" self.initDefault = self.storedText(editText)[0] def getEditInitDefault(self): """Return initial value in edit format, found in edit format option""" return self.formatEditText(self.initDefault)[0] def initDefaultChoices(self): """Return a list of choices for setting the init default""" return [] def sortValue(self, data): """Return value to be compared for sorting and conditionals""" storedText = data.get(self.name, '') return storedText.lower() def adjustedCompareValue(self, value): """Return conditional comparison value with real-time adjustments, used for date and time types' 'now' value""" return value def xslText(self): """Return what we need to write into an XSL file for this type""" return u'<xsl:if test="normalize-space(./%s)">%s'\ '<xsl:value-of select="./%s"/>%s</xsl:if>' % \ (self.name, xslEscape(self.prefix), self.name, xslEscape(self.suffix)) def xslTestText(self): """Return XSL file test for data existance""" return u'normalize-space(./%s)' % self.name class LongTextFormat(TextFormat): """Holds format info for a long text field - Obsolete - kept for compatability with old files""" # typeName = 'LongText' defaultNumLines = 7 def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) class NumberFormat(TextFormat): """Holds format info for a number field""" typeName = 'Number' sortSequence = 10 #field format edit options: defaultFormat = u'#.##' formatMenuList = [(u'%s\t%s' % (_('Optional Digit'), '#'), '#'), (u'%s\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\t%s' % (_('Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\t%s' % (_('Decimal Point'), '.'), '.'), (u'%s\t%s' % (_('Decimal Comma'), ','), ','), None, (u'%s\t%s' % (_('Comma Separator'), '\,'), '\,'), (u'%s\t%s' % (_('Dot Separator'), '\.'), '\.'), (u'%s\t%s' % (_('Space Separator (internal)'), _('<space>')), ' '), None, (u'%s\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\t%s' % (_('Required Sign'), '+'), '+'), None, (u'%s\t%s' % (_('Exponent (capital)'), 'E'), 'E'), (u'%s\t%s' % (_('Exponent (small)'), 'e'), 'e')] def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" try: text = GenNumber(storedText).numStr(self.format) except GenNumberError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using self.format""" try: return (GenNumber(storedText).numStr(self.format), True) except GenNumberError: return (storedText, not storedText) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using self.format""" try: return (repr(GenNumber().setFromStr(editText, self.format)), True) except GenNumberError: return (editText, not editText and not self.isRequired) def sortValue(self, data): """Return value to be compared for sorting and conditionals""" storedText = data.get(self.name, '') try: return GenNumber(storedText).num except GenNumberError: return '' class ChoiceFormat(TextFormat): """Holds format info for a field with one of several text options""" typeName = 'Choice' sortSequence = 20 editSep = '/' #field format edit options: defaultFormat = '1/2/3/4' formatMenuList = [(u'%s\t%s' % (_('Separator'), '/'), '/'), None, (u'%s\t%s' % (_('"/" Character'), '//'), '//'), None, (u'%s\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')] hasEditChoices = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def initFormat(self): """Called by base init, after class change or format text change""" self.formatList = self.splitText(self.format) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" if storedText not in self.formatList: storedText = _errorStr return TextFormat.formatOutput(self, storedText, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" if storedText in self.formatList: return (storedText, True) return (storedText, not storedText) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired) def getEditChoices(self, currentText=''): """Return list of choices for combo box, each a tuple of edit text and any annotation text""" return [(text, '') for text in self.formatList] def initDefaultChoices(self): """Return a list of choices for setting the init default""" return [text for text in self.formatList] def splitText(self, textStr): """Split textStr using editSep, double sep's become char""" return [text.strip().replace('\0', self.editSep) for text in textStr.replace(self.editSep * 2, '\0'). split(self.editSep)] class CombinationFormat(ChoiceFormat): """Holds format info for a field of combinations of text options""" typeName = 'Combination' outputSepList = (',', ';', ':', '|', '/', '\\', '~') def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" ChoiceFormat.__init__(self, name, attrs) def initFormat(self): """Called by base init, after class change or format text change""" ChoiceFormat.initFormat(self) fullFormat = ''.join(self.formatList) try: self.sep = [sep for sep in CombinationFormat.outputSepList if sep not in fullFormat][0] + ' ' except IndexError: self.sep = CombinationFormat.outputSepList[0] + ' ' def sortedChoices(self, inText): """Return tuple of choices from inText sorted like format and True if all splits are valid and included""" choices = self.splitText(inText) sortedChoices = [text for text in self.formatList if text in choices] if len(choices) == len(sortedChoices): return (sortedChoices, True) else: return (sortedChoices, False) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" choices, valid = self.sortedChoices(storedText) if valid: result = self.sep.join(choices) else: result = _errorStr return TextFormat.formatOutput(self, result, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" for choice in self.splitText(storedText): if choice not in self.formatList: return (storedText, not storedText) return (storedText, True) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" choices, valid = self.sortedChoices(editText) if valid: return (self.editSep.join(choices), True) else: return (editText, not editText and not self.isRequired) def getEditChoices(self, currentText=''): """Return list of choices for combo box, each a tuple of edit text and any annotation text""" currentChoices, valid = self.sortedChoices(currentText) nonChoices = [text for text in self.formatList if text not in currentChoices] results = [] for choice in nonChoices: # menu entries to add a choice allChoices = currentChoices + [choice] allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('add'), choice))) if currentChoices: results.append((None, None)) # separator for choice in currentChoices: # menu entries to remove a choice allChoices = currentChoices[:] allChoices.remove(choice) allChoices = [text for text in self.formatList if text in allChoices] results.append((self.editSep.join(allChoices), '(%s %s)' % (_('remove'), choice))) return results def initDefaultChoices(self): """Return a list of choices for setting the init default""" return [entry[0] for entry in self.getEditChoices()] class AutoChoiceFormat(ChoiceFormat): """Holds format info for a field with one of several text options""" typeName = 'AutoChoice' #field format edit options: defaultFormat = '' formatMenuList = () hasEditChoices = True autoAddChoices = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def initFormat(self): """Called by base init, after class change or format text change""" self.formatList = [] def addChoice(self, choice, sort=False): """Add choice to edit menu list if not already there""" if choice and choice not in self.formatList: self.formatList.append(choice) if sort: self.sortChoices() def sortChoices(self): """Sort menu list choices""" self.formatList.sort() def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" return TextFormat.formatOutput(self, storedText, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" return (storedText, True) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" if editText: return (editText, True) return (editText, not self.isRequired) class DateFormat(TextFormat): """Holds format info for a date field""" typeName = 'Date' sortSequence = 5 #field format edit options: defaultFormat = u'mmmm d, yyyy' dateStampStrings = ('Now', _('Now', 'date stamp setting')) formatMenuList = [(u'%s\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (u'%s\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'), (u'%s\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'), None, (u'%s\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'), (u'%s\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'), None, (u'%s\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')] hasEditChoices = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" try: text = GenDate(storedText).dateStr(self.format) except GenDateError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" format = globalref.options.strData('EditDateFormat', True) try: return (GenDate(storedText).dateStr(format), True) except GenDateError: return (storedText, not storedText) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" format = globalref.options.strData('EditDateFormat', True) try: return (repr(GenDate().setFromStr(editText, format)), True) except GenDateError: return (editText, not editText and not self.isRequired) def getEditChoices(self, currentText=''): """Return list of choices for combo box, each a tuple of edit text and any annotation text""" format = globalref.options.strData('EditDateFormat', True) today = GenDate().dateStr(format) yesterday = (GenDate() - 1).dateStr(format) tomorrow = (GenDate() + 1).dateStr(format) return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _('yesterday')), (tomorrow, '(%s)' % _('tomorrow'))] def getInitDefault(self): """Return initial stored value for new nodes""" if self.initDefault in DateFormat.dateStampStrings: return GenDate().dateStr() return TextFormat.getInitDefault(self) def setInitDefault(self, editText): """Set initial value from editor version using edit format option""" if editText in DateFormat.dateStampStrings: self.initDefault = DateFormat.dateStampStrings[0] else: TextFormat.setInitDefault(self, editText) def getEditInitDefault(self): """Return initial value in edit format, found in edit format option""" if self.initDefault in DateFormat.dateStampStrings: return DateFormat.dateStampStrings[1] return TextFormat.getEditInitDefault(self) def initDefaultChoices(self): """Return a list of choices for setting the init default""" choices = [entry[0] for entry in self.getEditChoices()] choices.insert(0, DateFormat.dateStampStrings[1]) return choices def adjustedCompareValue(self, value): """Return conditional comparison value with real-time adjustments, used for date and time types' 'now' value""" if value.startswith('now'): return repr(GenDate()) return value class TimeFormat(TextFormat): """Holds format info for a time field""" typeName = 'Time' sortSequence = 6 #field format edit options: defaultFormat = u'h:MM:SS aa' timeStampStrings = ('Now', _('Now', 'time stamp setting')) formatMenuList = [(u'%s\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'), 'H'), (u'%s\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (u'%s\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (u'%s\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (u'%s\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'), (u'%s\t%s' % (_('Fractional Seconds'), 's'), 's'), None, (u'%s\t%s' % (_('AM/PM'), 'AA'), 'AA'), (u'%s\t%s' % (_('am/pm'), 'aa'),'aa')] hasEditChoices = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" try: text = GenTime(storedText).timeStr(self.format) except GenTimeError: text = _errorStr return TextFormat.formatOutput(self, text, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" format = globalref.options.strData('EditTimeFormat', True) try: return (GenTime(storedText).timeStr(format), True) except GenTimeError: return (storedText, not storedText) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" try: return (repr(GenTime(editText)), True) except GenTimeError: return (editText, not editText and not self.isRequired) def getEditChoices(self, currentText=''): """Return list of choices for combo box, each a tuple of edit text and annotated text""" format = globalref.options.strData('EditTimeFormat', True) now = GenTime().timeStr(format) choices = [(now, '(%s)' % _('now'))] for hr in (6, 9, 12, 15, 18, 21, 0): time = GenTime((hr, 0)).timeStr(format) choices.append((time, '')) return choices def getInitDefault(self): """Return initial stored value for new nodes""" if self.initDefault in TimeFormat.timeStampStrings: return GenTime().timeStr() return TextFormat.getInitDefault(self) def setInitDefault(self, editText): """Set initial value from editor version using edit format option""" if editText in TimeFormat.timeStampStrings: self.initDefault = TimeFormat.timeStampStrings[0] else: TextFormat.setInitDefault(self, editText) def getEditInitDefault(self): """Return initial value in edit format, found in edit format option""" if self.initDefault in TimeFormat.timeStampStrings: return TimeFormat.timeStampStrings[1] return TextFormat.getEditInitDefault(self) def initDefaultChoices(self): """Return a list of choices for setting the init default""" choices = [entry[0] for entry in self.getEditChoices()] choices.insert(0, TimeFormat.timeStampStrings[1]) return choices def adjustedCompareValue(self, value): """Return conditional comparison value with real-time adjustments, used for date and time types' 'now' value""" if value.startswith('now'): return repr(GenTime()) return value class BooleanFormat(ChoiceFormat): """Holds format info for a bool field""" typeName = 'Boolean' sortSequence = 1 #field format edit options: defaultFormat = _('yes/no') formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _('T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')), None, ('1/0', '1/0')] hasEditChoices = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" ChoiceFormat.__init__(self, name, attrs) def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped if not in titleMode""" if storedText not in self.formatList: try: storedText = GenBoolean(storedText).boolStr(self.format) except GenBooleanError: storedText = _errorStr return TextFormat.formatOutput(self, storedText, titleMode, internal) def formatEditText(self, storedText): """Return tuple of text in edit format and bool validity, using edit format option""" if storedText in self.formatList: return (storedText, True) try: return (GenBoolean(storedText).boolStr(self.format), True) except GenBooleanError: return (storedText, not storedText) def storedText(self, editText): """Return tuple of stored text from edited text and bool validity, using edit format option""" try: return (repr(GenBoolean(editText)), True) except GenBooleanError: if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired) def sortValue(self, data): """Return value to be compared for sorting and conditionals""" storedText = data.get(self.name, '') try: return repr(GenBoolean(storedText)) except GenBooleanError: return '' class UniqueIDFormat(TextFormat): """An unique ID automatically generated for new nodes""" typeName = 'UniqueID' sortSequence = 10 formatRe = re.compile('([^0-9]*)([0-9]+)(.*)') #field format edit options: defaultFormat = u'0001' formatMenuList = [(u'%s\t%s' % (_('Required Digit'), '0'), '0'), None, (u'%s\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')] def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def nextValue(self, increment=True): """Return the next value for a new node, increment format if increment is True""" try: prefix, numText, suffix = UniqueIDFormat.formatRe.\ match(self.format).groups() except AttributeError: self.format = UniqueIDFormat.defaultFormat return self.nextValue(increment) value = self.format if increment: pattern = u'%%s%%0.%dd%%s' % len(numText) num = int(numText) + 1 self.format = pattern % (prefix, num, suffix) return value def sortValue(self, data): """Return value to be compared for sorting and conditionals""" storedText = data.get(self.name, '') try: return int(UniqueIDFormat.formatRe.match(storedText).group(2)) except AttributeError: return 0 class URLFormat(TextFormat): """Holds format info for a field with a URL path""" typeName = 'URL' sortSequence = 8 htmlOption = False allowAltLinkText = True hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#') URLMethod = u'http://' def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def initFormat(self): """Called by base init, after class change or format text change""" self.html = True def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" if self.useFileInfo: item = globalref.docRef.fileInfoItem altText = '' if self.linkAltField: field = item.nodeFormat().findField(self.linkAltField) if field: altText = field.outputText(item, titleMode, internal) storedText = item.data.get(self.name, '') if storedText: return self.formatOutput(storedText, titleMode, altText, internal) return '' def formatOutput(self, storedText, titleMode, altText='', internal=False): """Return formatted text, properly escaped and with a link reference if not in titleMode""" if titleMode: return TextFormat.formatOutput(self, storedText, titleMode, internal) paths = storedText.split('\n') results = [] for url in paths: path = url if not URLFormat.hasMethodRe.match(path): path = u'%s%s' % (self.URLMethod, path) path = u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict), altText or url) results.append(TextFormat.formatOutput(self, path, titleMode, internal)) return u'<br />'.join(results) def xslText(self): """Return what we need to write into an XSL file for this type""" return u'<xsl:for-each select = "./%s">%s<xsl:choose>'\ '<xsl:when test="contains(., \':\')"><a href="{.}">'\ '<xsl:value-of select="."/></a></xsl:when><xsl:otherwise>'\ '<a href="%s{.}"><xsl:value-of select="."/></a>'\ '</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \ (self.name, xslEscape(self.prefix), self.URLMethod, xslEscape(self.suffix)) class PathFormat(URLFormat): """Holds format info for a field with a local path""" typeName = 'Path' URLMethod = u'file:///' hasFileBrowse = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" URLFormat.__init__(self, name, attrs) class EmailFormat(URLFormat): """Holds format info for a field with a local path""" typeName = 'Email' URLMethod = u'mailto:' def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" URLFormat.__init__(self, name, attrs) class InternalLinkFormat(URLFormat): """Holds format info for a field with a local path""" typeName = 'InternalLink' URLMethod = u'#' def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" URLFormat.__init__(self, name, attrs) class ExecuteLinkFormat(URLFormat): """Holds format info for an executable field""" typeName = 'ExecuteLink' URLMethod = u'exec:' hasFileBrowse = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" URLFormat.__init__(self, name, attrs) def formatOutput(self, storedText, titleMode, altText='', internal=False): """Return formatted text, properly escaped and with a link reference if not in titleMode""" if titleMode or not internal: return TextFormat.formatOutput(self, storedText, titleMode, internal) paths = storedText.split('\n') results = [] for url in paths: # add prefix/suffix within the executable path: url = TextFormat.formatOutput(self, url, titleMode, internal) path = url if not URLFormat.hasMethodRe.match(path): path = u'%s%s' % (self.URLMethod, path) results.append(u'<a href="%s">%s</a>' % (escape(path, treedoc.escDict), altText or url)) return u'<br />'.join(results) def xslText(self): """Return what we need to write into an XSL file for this type""" return TextFormat.xslText(self) class PictureFormat(TextFormat): """Holds format info for a field with a link to a picture""" typeName = 'Picture' sortSequence = 8 htmlOption = False hasFileBrowse = True def __init__(self, name, attrs={}): """Any format, prefix, suffix, html info in attrs dict""" TextFormat.__init__(self, name, attrs) def initFormat(self): """Called by base init, after class change or format text change""" self.html = True def formatOutput(self, storedText, titleMode, internal=False): """Return formatted text, properly escaped and with a link to the picture if not in titleMode""" if titleMode: return TextFormat.formatOutput(self, storedText, titleMode, internal) paths = storedText.split('\n') results = ['<img src="%s">' % escape(url, treedoc.escDict) for url in paths] return u'<br />'.join(results) class ParentFormat(TextFormat): """Placeholder format for references to specific parents""" typeName = 'Parent' def __init__(self, name, parentLevel=1): TextFormat.__init__(self, name, {}) self.parentLevel = parentLevel def sepName(self, englishOnly=False): """Return name enclosed with {* *} separators""" name = englishOnly and self.enName or self.name return u'{*%s%s*}' % (self.parentLevel * '*', name) def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" for num in range(self.parentLevel): item = item.parent if not item: return '' field = item.nodeFormat().findField(self.name) if not field: return '' return field.outputText(item, titleMode, internal) def xslText(self): """Return what we need to write into an XSL file for this type""" return u'<xsl:value-of select="%s%s"/>' % (self.parentLevel * '../', self.name) def xslTestText(self): """Return XSL file test for data existance""" return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name) class AncestorFormat(TextFormat): """Placeholder format for references to any parent with data""" typeName = 'Ancestor' def __init__(self, name): TextFormat.__init__(self, name, {}) self.parentLevel = 1000 def sepName(self, englishOnly=False): """Return name enclosed with {*? *} separators""" name = englishOnly and self.enName or self.name return u'{*?%s*}' % (name) def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" field = None while not field: item = item.parent if item: field = item.nodeFormat().findField(self.name) else: return '' return field.outputText(item, titleMode, internal) def xslText(self): """Return what we need to write into an XSL file for this type""" return u'<xsl:value-of select="ancestor::*/%s"/>' % self.name def xslTestText(self): """Return XSL file test for data existance""" return u'normalize-space(ancestor::*/%s)' % self.name class ChildFormat(TextFormat): """Placeholder format for references to a sequence of child data""" typeName = 'Child' def __init__(self, name): TextFormat.__init__(self, name, {}) self.parentLevel = -1 def sepName(self, englishOnly=False): """Return name enclosed with {*? *} separators""" name = englishOnly and self.enName or self.name return u'{*&%s*}' % (name) def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" result = [] for child in item.childList: field = child.nodeFormat().findField(self.name) if field: text = field.outputText(child, titleMode, internal) if text: result.append(text) return globalref.docRef.childFieldSep.join(result) def xslText(self): """Return what we need to write into an XSL file for this type""" return u'<xsl:value-of select="child::*/%s"/>' % self.name def xslTestText(self): """Return XSL file test for data existance""" return u'normalize-space(child::*/%s)' % self.name class CountFormat(TextFormat): """Placeholder format for a count of children at the given level""" typeName = 'Count' def __init__(self, name, level): TextFormat.__init__(self, name, {}) self.parentLevel = -level def sepName(self, englishOnly=False): """Return name enclosed with {*? *} separators""" name = englishOnly and self.enName or self.name return u'{*#%s*}' % (name) def outputText(self, item, titleMode, internal=False): """Return formatted text for this field""" return repr(len(item.descendLevelList(-self.parentLevel)))
normal
{ "blob_id": "5e1398ed628917a42cc465e7cc2979601f0f4fbc", "index": 7865, "step-1": "<mask token>\n\n\nclass DateFormat(TextFormat):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n", "step-2": "<mask token>\n\n\nclass TextFormat(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n <mask token>\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n", "step-3": "<mask token>\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n", "step-4": "<mask token>\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only &nbsp; is supported\"\"\"\n nonTagRe = re.compile('(.*?)(<.*?>)|(.*)')\n escDict = {'&amp;nbsp;': '&#xa0;'}\n\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None:\n return u'<xsl:text>%s</xsl:text>' % escape(matchObj.group(3),\n escDict)\n if matchObj.group(1):\n return u'<xsl:text>%s</xsl:text>%s' % (escape(matchObj.group(1),\n escDict), matchObj.group(2))\n return matchObj.group(2)\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = ''\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y'\n ) and True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y'\n ) and True or False\n try:\n self.numLines = int(attrs.get(u'lines', repr(self.defaultNumLines))\n )\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField, treedoc.\n escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return result[0], False\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return editText, editText or not self.isRequired\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:if test=\"normalize-space(./%s)\">%s<xsl:value-of select=\"./%s\"/>%s</xsl:if>'\n % (self.name, xslEscape(self.prefix), self.name, xslEscape(\n self.suffix)))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n defaultNumLines = 7\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'), (\n u'%s\\t%s' % (_('Required Digit'), '0'), '0'), (u'%s\\t%s' % (_(\n 'Digit or Space (external)'), _('<space>')), ' '), None, (u'%s\\t%s' %\n (_('Decimal Point'), '.'), '.'), (u'%s\\t%s' % (_('Decimal Comma'),\n ','), ','), None, (u'%s\\t%s' % (_('Comma Separator'), '\\\\,'), '\\\\,'\n ), (u'%s\\t%s' % (_('Dot Separator'), '\\\\.'), '\\\\.'), (u'%s\\t%s' % (\n _('Space Separator (internal)'), _('<space>')), ' '), None, (\n u'%s\\t%s' % (_('Optional Sign'), '-'), '-'), (u'%s\\t%s' % (_(\n 'Required Sign'), '+'), '+'), None, (u'%s\\t%s' % (_(\n 'Exponent (capital)'), 'E'), 'E'), (u'%s\\t%s' % (_(\n 'Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return GenNumber(storedText).numStr(self.format), True\n except GenNumberError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return repr(GenNumber().setFromStr(editText, self.format)), True\n except GenNumberError:\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None, (\n u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None, (u'%s\\t%s' % (\n _('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\x00', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\x00').split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = ',', ';', ':', '|', '/', '\\\\', '~'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList if \n sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return sortedChoices, True\n else:\n return sortedChoices, False\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return storedText, not storedText\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return self.editSep.join(choices), True\n else:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList if text not in\n currentChoices]\n results = []\n for choice in nonChoices:\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'add'), choice)))\n if currentChoices:\n results.append((None, None))\n for choice in currentChoices:\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList if text in\n allChoices]\n results.append((self.editSep.join(allChoices), '(%s %s)' % (_(\n 'remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return storedText, True\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return editText, True\n return editText, not self.isRequired\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = 'Now', _('Now', 'date stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'), (\n u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'), None, (u'%s\\t%s' %\n (_('Month (1 or 2 digits)'), 'm'), 'm'), (u'%s\\t%s' % (_(\n 'Month (2 digits)'), 'mm'), 'mm'), (u'%s\\t%s' % (_(\n 'Month Abbreviation'), 'mmm'), 'mmm'), (u'%s\\t%s' % (_('Month Name'\n ), 'mmmm'), 'mmmm'), None, (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'\n ), 'yy'), (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None, (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'), (u'%s\\t%s' %\n (_('Weekday Abbreviation'), 'www'), 'www'), (u'%s\\t%s' % (_(\n 'Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return GenDate(storedText).dateStr(format), True\n except GenDateError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return repr(GenDate().setFromStr(editText, format)), True\n except GenDateError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')), (yesterday, '(%s)' % _(\n 'yesterday')), (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = 'Now', _('Now', 'time stamp setting')\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'), (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'), (\n u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'), 'h'), (\n u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'), None, (\n u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'), (u'%s\\t%s' %\n (_('Minute (2 digits)'), 'MM'), 'MM'), None, (u'%s\\t%s' % (_(\n 'Second (1 or 2 digits)'), 'S'), 'S'), (u'%s\\t%s' % (_(\n 'Second (2 digits)'), 'SS'), 'SS'), (u'%s\\t%s' % (_(\n 'Fractional Seconds'), 's'), 's'), None, (u'%s\\t%s' % (_('AM/PM'),\n 'AA'), 'AA'), (u'%s\\t%s' % (_('am/pm'), 'aa'), 'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return GenTime(storedText).timeStr(format), True\n except GenTimeError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenTime(editText)), True\n except GenTimeError:\n return editText, not editText and not self.isRequired\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')), (_('T/F'), _(\n 'T/F')), None, (_('yes/no'), _('yes/no')), (_('Y/N'), _('Y/N')),\n None, ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return storedText, True\n try:\n return GenBoolean(storedText).boolStr(self.format), True\n except GenBooleanError:\n return storedText, not storedText\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return repr(GenBoolean(editText)), True\n except GenBooleanError:\n if editText in self.formatList:\n return editText, True\n return editText, not editText and not self.isRequired\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'), (u'%s\\t%s' %\n (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.match(self.format\n ).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return (\n u'<xsl:for-each select = \"./%s\">%s<xsl:choose><xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\"><xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise><a href=\"%s{.}\"><xsl:value-of select=\".\"/></a></xsl:otherwise></xsl:choose>%s</xsl:for-each>'\n % (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix)))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.\n escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = [('<img src=\"%s\">' % escape(url, treedoc.escDict)) for\n url in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % name\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n", "step-5": "#!/usr/bin/env python\n\n#****************************************************************************\n# fieldformat.py, provides non-GUI base classes for field formating\n#\n# TreeLine, an information storage program\n# Copyright (C) 2006, Douglas W. Bell\n#\n# This is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License, either Version 2 or any later\n# version. This program is distributed in the hope that it will be useful,\n# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.\n#****************************************************************************\n\nimport re\nfrom xml.sax.saxutils import escape, unescape\nfrom gennumber import GenNumber, GenNumberError\nfrom gendate import GenDate, GenDateError\nfrom gentime import GenTime, GenTimeError\nfrom genboolean import GenBoolean, GenBooleanError\nimport treedoc\nimport globalref\n\n_errorStr = '#####'\n\n\ndef xslEscape(text):\n \"\"\"Encapsulate all literal text in <xsl:text> elements\n and transform/escape some non-XML entities.\n For the moment, only &nbsp; is supported\"\"\"\n nonTagRe = re.compile(r'(.*?)(<.*?>)|(.*)')\n escDict = {'&amp;nbsp;': '&#xa0;'} # escape function does '&' first\n def esc(matchObj):\n \"\"\"Return escaped replacement text\"\"\"\n if matchObj.group(1) == None: # no tags found\n return u'<xsl:text>%s</xsl:text>' % \\\n escape(matchObj.group(3), escDict)\n if matchObj.group(1): # leading text and tag\n return u'<xsl:text>%s</xsl:text>%s' % \\\n (escape(matchObj.group(1), escDict), matchObj.group(2))\n return matchObj.group(2) # tag only\n return nonTagRe.sub(esc, text)\n\n\nclass TextFormat(object):\n \"\"\"Holds format info for a normal text field\"\"\"\n typeName = 'Text'\n sortSequence = 20\n stripTagRe = re.compile('<.*?>')\n defaultNumLines = 1\n #field format edit options:\n defaultFormat = ''\n formatMenuList = []\n htmlOption = True\n hasEditChoices = False\n autoAddChoices = False\n hasFileBrowse = False\n allowAltLinkText = False\n\n def __init__(self, name, attrs={}):\n \"\"\"Any prefix, suffix, html info in attrs dict\"\"\"\n self.name = name\n self.enName = '' # used only by fileFormat field for i18n\n self.format = attrs.get(u'format', self.defaultFormat)\n self.prefix = attrs.get(u'prefix', '')\n self.suffix = attrs.get(u'suffix', '')\n # defaults to no html (line breaks preserved)\n self.html = attrs.get(u'html', '').startswith('y') and True or False\n self.isRequired = attrs.get(u'required', '').startswith('y') and \\\n True or False\n self.hidden = attrs.get(u'hidden', '').startswith('y') and \\\n True or False\n try:\n self.numLines = int(attrs.get(u'lines',\n repr(self.defaultNumLines)))\n except ValueError:\n self.numLines = 1\n self.initDefault = attrs.get(u'init', '')\n self.linkAltField = attrs.get(u'linkalt', '')\n self.parentLevel = 0\n self.useFileInfo = False\n self.showInDialog = True\n self.initFormat()\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n pass\n\n def duplicateSettings(self, otherField):\n \"\"\"Assign other field's parameters to this field\"\"\"\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog\n\n def changeType(self, newType):\n \"\"\"Change this field's type to newType with default format\"\"\"\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()\n\n def englishName(self):\n \"\"\"Returns English name if assigned, o/w name\"\"\"\n if self.enName:\n return self.enName\n return self.name\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n if not self.useFileInfo:\n return u'{*%s*}' % name\n return u'{*!%s*}' % name\n\n def labelName(self):\n \"\"\"Return name used for labels - add * for required fields\"\"\"\n if self.isRequired:\n return '%s*' % self.name\n return self.name\n\n def writeXml(self):\n \"\"\"Return text for xml attributes\"\"\"\n text = u' type=\"%s\"' % self.typeName\n if self.format:\n text += u' format=\"%s\"' % escape(self.format, treedoc.escDict)\n if self.prefix:\n text += u' prefix=\"%s\"' % escape(self.prefix, treedoc.escDict)\n if self.suffix:\n text += u' suffix=\"%s\"' % escape(self.suffix, treedoc.escDict)\n if self.html:\n text += u' html=\"y\"'\n if self.isRequired:\n text += u' required=\"y\"'\n if self.hidden:\n text += u' hidden=\"y\"'\n if self.numLines > 1:\n text += u' lines=\"%d\"' % self.numLines\n if self.initDefault:\n text += u' init=\"%s\"' % escape(self.initDefault, treedoc.escDict)\n if self.linkAltField:\n text += u' linkalt=\"%s\"' % escape(self.linkAltField,\n treedoc.escDict)\n return text\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''\n\n def removeMarkup(self, text):\n \"\"\"Remove HTML Markup and unescape entities\"\"\"\n text = TextFormat.stripTagRe.sub('', text)\n return unescape(text)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)\n\n def editText(self, item):\n \"\"\"Return tuple of this field's text in edit format and bool validity,\n using edit format option\"\"\"\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n return (editText, editText or not self.isRequired)\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n return self.initDefault\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n self.initDefault = self.storedText(editText)[0]\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n return self.formatEditText(self.initDefault)[0]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return []\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n return storedText.lower()\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n return value\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:if test=\"normalize-space(./%s)\">%s'\\\n '<xsl:value-of select=\"./%s\"/>%s</xsl:if>' % \\\n (self.name, xslEscape(self.prefix), self.name,\n xslEscape(self.suffix))\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(./%s)' % self.name\n\n\nclass LongTextFormat(TextFormat):\n \"\"\"Holds format info for a long text field - Obsolete -\n kept for compatability with old files\"\"\"\n # typeName = 'LongText'\n defaultNumLines = 7\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n\nclass NumberFormat(TextFormat):\n \"\"\"Holds format info for a number field\"\"\"\n typeName = 'Number'\n sortSequence = 10\n #field format edit options:\n defaultFormat = u'#.##'\n formatMenuList = [(u'%s\\t%s' % (_('Optional Digit'), '#'), '#'),\n (u'%s\\t%s' % (_('Required Digit'), '0'), '0'),\n (u'%s\\t%s' % (_('Digit or Space (external)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Decimal Point'), '.'), '.'),\n (u'%s\\t%s' % (_('Decimal Comma'), ','), ','),\n None,\n (u'%s\\t%s' % (_('Comma Separator'), '\\,'), '\\,'),\n (u'%s\\t%s' % (_('Dot Separator'), '\\.'), '\\.'),\n (u'%s\\t%s' % (_('Space Separator (internal)'),\n _('<space>')), ' '),\n None,\n (u'%s\\t%s' % (_('Optional Sign'), '-'), '-'),\n (u'%s\\t%s' % (_('Required Sign'), '+'), '+'),\n None,\n (u'%s\\t%s' % (_('Exponent (capital)'), 'E'), 'E'),\n (u'%s\\t%s' % (_('Exponent (small)'), 'e'), 'e')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenNumber(storedText).numStr(self.format)\n except GenNumberError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using self.format\"\"\"\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using self.format\"\"\"\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return GenNumber(storedText).num\n except GenNumberError:\n return ''\n\n\nclass ChoiceFormat(TextFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'Choice'\n sortSequence = 20\n editSep = '/'\n #field format edit options:\n defaultFormat = '1/2/3/4'\n formatMenuList = [(u'%s\\t%s' % (_('Separator'), '/'), '/'), None,\n (u'%s\\t%s' % (_('\"/\" Character'), '//'), '//'), None,\n (u'%s\\t%s' % (_('Example'), '1/2/3/4'), '1/2/3/4')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = self.splitText(self.format)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n return [(text, '') for text in self.formatList]\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [text for text in self.formatList]\n\n def splitText(self, textStr):\n \"\"\"Split textStr using editSep, double sep's become char\"\"\"\n return [text.strip().replace('\\0', self.editSep) for text in\n textStr.replace(self.editSep * 2, '\\0').\n split(self.editSep)]\n\n\nclass CombinationFormat(ChoiceFormat):\n \"\"\"Holds format info for a field of combinations of text options\"\"\"\n typeName = 'Combination'\n outputSepList = (',', ';', ':', '|', '/', '\\\\', '~')\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n ChoiceFormat.initFormat(self)\n fullFormat = ''.join(self.formatList)\n try:\n self.sep = [sep for sep in CombinationFormat.outputSepList\n if sep not in fullFormat][0] + ' '\n except IndexError:\n self.sep = CombinationFormat.outputSepList[0] + ' '\n\n def sortedChoices(self, inText):\n \"\"\"Return tuple of choices from inText sorted like format and\n True if all splits are valid and included\"\"\"\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n choices, valid = self.sortedChoices(storedText)\n if valid:\n result = self.sep.join(choices)\n else:\n result = _errorStr\n return TextFormat.formatOutput(self, result, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box,\n each a tuple of edit text and any annotation text\"\"\"\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n return [entry[0] for entry in self.getEditChoices()]\n\n\nclass AutoChoiceFormat(ChoiceFormat):\n \"\"\"Holds format info for a field with one of several text options\"\"\"\n typeName = 'AutoChoice'\n #field format edit options:\n defaultFormat = ''\n formatMenuList = ()\n hasEditChoices = True\n autoAddChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.formatList = []\n\n def addChoice(self, choice, sort=False):\n \"\"\"Add choice to edit menu list if not already there\"\"\"\n if choice and choice not in self.formatList:\n self.formatList.append(choice)\n if sort:\n self.sortChoices()\n\n def sortChoices(self):\n \"\"\"Sort menu list choices\"\"\"\n self.formatList.sort()\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n return (storedText, True)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)\n\n\nclass DateFormat(TextFormat):\n \"\"\"Holds format info for a date field\"\"\"\n typeName = 'Date'\n sortSequence = 5\n #field format edit options:\n defaultFormat = u'mmmm d, yyyy'\n dateStampStrings = ('Now', _('Now', 'date stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Day (1 or 2 digits)'), 'd'), 'd'),\n (u'%s\\t%s' % (_('Day (2 digits)'), 'dd'), 'dd'),\n None,\n (u'%s\\t%s' % (_('Month (1 or 2 digits)'), 'm'), 'm'),\n (u'%s\\t%s' % (_('Month (2 digits)'), 'mm'), 'mm'),\n (u'%s\\t%s' % (_('Month Abbreviation'), 'mmm'), 'mmm'),\n (u'%s\\t%s' % (_('Month Name'), 'mmmm'), 'mmmm'),\n None,\n (u'%s\\t%s' % (_('Year (2 digits)'), 'yy'), 'yy'),\n (u'%s\\t%s' % (_('Year (4 digits)'), 'yyyy'), 'yyyy'),\n None,\n (u'%s\\t%s' % (_('Weekday (1 digit)'), 'w'), 'w'),\n (u'%s\\t%s' % (_('Weekday Abbreviation'), 'www'), 'www'),\n (u'%s\\t%s' % (_('Weekday Name'), 'wwww'), 'wwww')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenDate(storedText).dateStr(self.format)\n except GenDateError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and any annotation text\"\"\"\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenDate())\n return value\n\n\nclass TimeFormat(TextFormat):\n \"\"\"Holds format info for a time field\"\"\"\n typeName = 'Time'\n sortSequence = 6\n #field format edit options:\n defaultFormat = u'h:MM:SS aa'\n timeStampStrings = ('Now', _('Now', 'time stamp setting'))\n formatMenuList = [(u'%s\\t%s' % (_('Hour (0-23, 1 or 2 digits)'), 'H'),\n 'H'),\n (u'%s\\t%s' % (_('Hour (00-23, 2 digits)'), 'HH'), 'HH'),\n (u'%s\\t%s' % (_('Hour (1-12, 1 or 2 digits)'), 'h'),\n 'h'),\n (u'%s\\t%s' % (_('Hour (01-12, 2 digits)'), 'hh'), 'hh'),\n None,\n (u'%s\\t%s' % (_('Minute (1 or 2 digits)'), 'M'), 'M'),\n (u'%s\\t%s' % (_('Minute (2 digits)'), 'MM'), 'MM'),\n None,\n (u'%s\\t%s' % (_('Second (1 or 2 digits)'), 'S'), 'S'),\n (u'%s\\t%s' % (_('Second (2 digits)'), 'SS'), 'SS'),\n (u'%s\\t%s' % (_('Fractional Seconds'), 's'), 's'),\n None,\n (u'%s\\t%s' % (_('AM/PM'), 'AA'), 'AA'),\n (u'%s\\t%s' % (_('am/pm'), 'aa'),'aa')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n try:\n text = GenTime(storedText).timeStr(self.format)\n except GenTimeError:\n text = _errorStr\n return TextFormat.formatOutput(self, text, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)\n\n def getEditChoices(self, currentText=''):\n \"\"\"Return list of choices for combo box, \n each a tuple of edit text and annotated text\"\"\"\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices\n\n def getInitDefault(self):\n \"\"\"Return initial stored value for new nodes\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return GenTime().timeStr()\n return TextFormat.getInitDefault(self)\n\n def setInitDefault(self, editText):\n \"\"\"Set initial value from editor version using edit format option\"\"\"\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)\n\n def getEditInitDefault(self):\n \"\"\"Return initial value in edit format, found in edit format option\"\"\"\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)\n\n def initDefaultChoices(self):\n \"\"\"Return a list of choices for setting the init default\"\"\"\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices\n\n def adjustedCompareValue(self, value):\n \"\"\"Return conditional comparison value with real-time adjustments,\n used for date and time types' 'now' value\"\"\"\n if value.startswith('now'):\n return repr(GenTime())\n return value\n\n\nclass BooleanFormat(ChoiceFormat):\n \"\"\"Holds format info for a bool field\"\"\"\n typeName = 'Boolean'\n sortSequence = 1\n #field format edit options:\n defaultFormat = _('yes/no')\n formatMenuList = [(_('true/false'), _('true/false')),\n (_('T/F'), _('T/F')), None,\n (_('yes/no'), _('yes/no')),\n (_('Y/N'), _('Y/N')), None,\n ('1/0', '1/0')]\n hasEditChoices = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n ChoiceFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped if not in titleMode\"\"\"\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)\n\n def formatEditText(self, storedText):\n \"\"\"Return tuple of text in edit format and bool validity,\n using edit format option\"\"\"\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)\n\n def storedText(self, editText):\n \"\"\"Return tuple of stored text from edited text and bool validity,\n using edit format option\"\"\"\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return repr(GenBoolean(storedText))\n except GenBooleanError:\n return ''\n\n\nclass UniqueIDFormat(TextFormat):\n \"\"\"An unique ID automatically generated for new nodes\"\"\"\n typeName = 'UniqueID'\n sortSequence = 10\n formatRe = re.compile('([^0-9]*)([0-9]+)(.*)')\n #field format edit options:\n defaultFormat = u'0001'\n formatMenuList = [(u'%s\\t%s' % (_('Required Digit'), '0'), '0'), None,\n (u'%s\\t%s' % (_('Start Num Example'), '0100'), '0100'),\n (u'%s\\t%s' % (_('Prefix Example'), 'id0100'), 'id0100')]\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def nextValue(self, increment=True):\n \"\"\"Return the next value for a new node,\n increment format if increment is True\"\"\"\n try:\n prefix, numText, suffix = UniqueIDFormat.formatRe.\\\n match(self.format).groups()\n except AttributeError:\n self.format = UniqueIDFormat.defaultFormat\n return self.nextValue(increment)\n value = self.format\n if increment:\n pattern = u'%%s%%0.%dd%%s' % len(numText)\n num = int(numText) + 1\n self.format = pattern % (prefix, num, suffix)\n return value\n\n def sortValue(self, data):\n \"\"\"Return value to be compared for sorting and conditionals\"\"\"\n storedText = data.get(self.name, '')\n try:\n return int(UniqueIDFormat.formatRe.match(storedText).group(2))\n except AttributeError:\n return 0\n\n\nclass URLFormat(TextFormat):\n \"\"\"Holds format info for a field with a URL path\"\"\"\n typeName = 'URL'\n sortSequence = 8\n htmlOption = False\n allowAltLinkText = True\n hasMethodRe = re.compile('[a-zA-Z][a-zA-Z]+:|#')\n URLMethod = u'http://'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:for-each select = \"./%s\">%s<xsl:choose>'\\\n '<xsl:when test=\"contains(., \\':\\')\"><a href=\"{.}\">'\\\n '<xsl:value-of select=\".\"/></a></xsl:when><xsl:otherwise>'\\\n '<a href=\"%s{.}\"><xsl:value-of select=\".\"/></a>'\\\n '</xsl:otherwise></xsl:choose>%s</xsl:for-each>' % \\\n (self.name, xslEscape(self.prefix), self.URLMethod,\n xslEscape(self.suffix))\n\n\nclass PathFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Path'\n URLMethod = u'file:///'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass EmailFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'Email'\n URLMethod = u'mailto:'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass InternalLinkFormat(URLFormat):\n \"\"\"Holds format info for a field with a local path\"\"\"\n typeName = 'InternalLink'\n URLMethod = u'#'\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n\nclass ExecuteLinkFormat(URLFormat):\n \"\"\"Holds format info for an executable field\"\"\"\n typeName = 'ExecuteLink'\n URLMethod = u'exec:'\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n URLFormat.__init__(self, name, attrs)\n\n def formatOutput(self, storedText, titleMode, altText='', internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link reference if not in titleMode\"\"\"\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return TextFormat.xslText(self)\n\n\nclass PictureFormat(TextFormat):\n \"\"\"Holds format info for a field with a link to a picture\"\"\"\n typeName = 'Picture'\n sortSequence = 8\n htmlOption = False\n hasFileBrowse = True\n\n def __init__(self, name, attrs={}):\n \"\"\"Any format, prefix, suffix, html info in attrs dict\"\"\"\n TextFormat.__init__(self, name, attrs)\n\n def initFormat(self):\n \"\"\"Called by base init, after class change or format text change\"\"\"\n self.html = True\n\n def formatOutput(self, storedText, titleMode, internal=False):\n \"\"\"Return formatted text, properly escaped and with\n a link to the picture if not in titleMode\"\"\"\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)\n\n\nclass ParentFormat(TextFormat):\n \"\"\"Placeholder format for references to specific parents\"\"\"\n typeName = 'Parent'\n\n def __init__(self, name, parentLevel=1):\n TextFormat.__init__(self, name, {})\n self.parentLevel = parentLevel\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {* *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*%s%s*}' % (self.parentLevel * '*', name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n for num in range(self.parentLevel):\n item = item.parent\n if not item:\n return ''\n field = item.nodeFormat().findField(self.name)\n if not field:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"%s%s\"/>' % (self.parentLevel * '../',\n self.name)\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(%s%s)' % (self.parentLevel * '../', self.name)\n\n\nclass AncestorFormat(TextFormat):\n \"\"\"Placeholder format for references to any parent with data\"\"\"\n typeName = 'Ancestor'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = 1000\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*?%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n field = None\n while not field:\n item = item.parent\n if item:\n field = item.nodeFormat().findField(self.name)\n else:\n return ''\n return field.outputText(item, titleMode, internal)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"ancestor::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(ancestor::*/%s)' % self.name\n\n\nclass ChildFormat(TextFormat):\n \"\"\"Placeholder format for references to a sequence of child data\"\"\"\n typeName = 'Child'\n\n def __init__(self, name):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -1\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*&%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n result = []\n for child in item.childList:\n field = child.nodeFormat().findField(self.name)\n if field:\n text = field.outputText(child, titleMode, internal)\n if text:\n result.append(text)\n return globalref.docRef.childFieldSep.join(result)\n\n def xslText(self):\n \"\"\"Return what we need to write into an XSL file for this type\"\"\"\n return u'<xsl:value-of select=\"child::*/%s\"/>' % self.name\n\n def xslTestText(self):\n \"\"\"Return XSL file test for data existance\"\"\"\n return u'normalize-space(child::*/%s)' % self.name\n\n\nclass CountFormat(TextFormat):\n \"\"\"Placeholder format for a count of children at the given level\"\"\"\n typeName = 'Count'\n\n def __init__(self, name, level):\n TextFormat.__init__(self, name, {})\n self.parentLevel = -level\n\n def sepName(self, englishOnly=False):\n \"\"\"Return name enclosed with {*? *} separators\"\"\"\n name = englishOnly and self.enName or self.name\n return u'{*#%s*}' % (name)\n\n def outputText(self, item, titleMode, internal=False):\n \"\"\"Return formatted text for this field\"\"\"\n return repr(len(item.descendLevelList(-self.parentLevel)))\n", "step-ids": [ 100, 168, 171, 173, 175 ] }
[ 100, 168, 171, 173, 175 ]