code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
import nltk
import spacy
import textacy
from keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization
from keras_preprocessing.sequence import pad_sequences
from keras_preprocessing.text import Tokenizer
from nltk import word_tokenize, re
from rasa import model
import pandas as pd
from spacy import lemmatizer
nlp = spacy.load('en_core_web_sm')
text=input("Enter the text to find the triplet: ")
str=nlp(text)
def load_dataset(filename):
df = pd.read_csv(filename, encoding="latin1",
names=["Sentence", "Intent"])
intent = df["Intent"]
unique_intent = list(set(intent))
sentences = list(df["Sentence"])
return (intent, unique_intent, sentences)
def cleaning(sentences):
words = []
for s in sentences:
clean = re.sub(r'[^ a-z A-Z 0-9]', " ", s)
w = nltk.word_tokenize(clean)
# lemmatizing
words.append([lemmatizer.lemmatize(i.lower()) for i in w])
return words
def create_tokenizer(words,
filters = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~'):
token = Tokenizer(filters = filters)
token.fit_on_texts(words)
return token
def max_length(words):
return(len(max(words, key = len)))
def encoding_doc(token, words):
return(token.texts_to_sequences(words))
def findTriplets(str):
tuple_data=textacy.extract.subject_verb_object_triples(str)
return tuple_data
def creatingLists(tuple_data):
tuple_to_lists=list(tuple_data)
return tuple_to_lists
def displaySubjectVerbObject(tuples_to_lists):
for item in tuples_to_lists:
print(item)
tuple_data=findTriplets(str)
list=creatingLists(tuple_data)
displaySubjectVerbObject(list)
|
normal
|
{
"blob_id": "707855a4e07b68d9ae97c2e1dc8bfd52f11c314c",
"index": 1812,
"step-1": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\n<mask token>\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\n<mask token>\ndisplaySubjectVerbObject(list)\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n",
"step-4": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\nnlp = spacy.load('en_core_web_sm')\ntext = input('Enter the text to find the triplet: ')\nstr = nlp(text)\n\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding='latin1', names=['Sentence', 'Intent'])\n intent = df['Intent']\n unique_intent = list(set(intent))\n sentences = list(df['Sentence'])\n return intent, unique_intent, sentences\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub('[^ a-z A-Z 0-9]', ' ', s)\n w = nltk.word_tokenize(clean)\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n return words\n\n\ndef create_tokenizer(words, filters='!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~'):\n token = Tokenizer(filters=filters)\n token.fit_on_texts(words)\n return token\n\n\ndef max_length(words):\n return len(max(words, key=len))\n\n\ndef encoding_doc(token, words):\n return token.texts_to_sequences(words)\n\n\ndef findTriplets(str):\n tuple_data = textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\n\ndef creatingLists(tuple_data):\n tuple_to_lists = list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data = findTriplets(str)\nlist = creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)\n",
"step-5": "import nltk\nimport spacy\nimport textacy\nfrom keras.layers import Embedding, Bidirectional, Dense, Dropout, BatchNormalization\nfrom keras_preprocessing.sequence import pad_sequences\nfrom keras_preprocessing.text import Tokenizer\nfrom nltk import word_tokenize, re\nfrom rasa import model\nimport pandas as pd\nfrom spacy import lemmatizer\n\nnlp = spacy.load('en_core_web_sm')\n\ntext=input(\"Enter the text to find the triplet: \")\nstr=nlp(text)\n\ndef load_dataset(filename):\n df = pd.read_csv(filename, encoding=\"latin1\",\n names=[\"Sentence\", \"Intent\"])\n intent = df[\"Intent\"]\n unique_intent = list(set(intent))\n sentences = list(df[\"Sentence\"])\n\n return (intent, unique_intent, sentences)\n\n\ndef cleaning(sentences):\n words = []\n for s in sentences:\n clean = re.sub(r'[^ a-z A-Z 0-9]', \" \", s)\n w = nltk.word_tokenize(clean)\n # lemmatizing\n words.append([lemmatizer.lemmatize(i.lower()) for i in w])\n\n\n return words\n\ndef create_tokenizer(words,\n filters = '!\"#$%&()*+,-./:;<=>?@[\\]^_`{|}~'):\n token = Tokenizer(filters = filters)\n token.fit_on_texts(words)\n return token\ndef max_length(words):\n return(len(max(words, key = len)))\ndef encoding_doc(token, words):\n return(token.texts_to_sequences(words))\n\ndef findTriplets(str):\n tuple_data=textacy.extract.subject_verb_object_triples(str)\n return tuple_data\n\ndef creatingLists(tuple_data):\n tuple_to_lists=list(tuple_data)\n return tuple_to_lists\n\n\ndef displaySubjectVerbObject(tuples_to_lists):\n for item in tuples_to_lists:\n print(item)\n\n\ntuple_data=findTriplets(str)\nlist=creatingLists(tuple_data)\ndisplaySubjectVerbObject(list)",
"step-ids": [
7,
9,
10,
11,
12
]
}
|
[
7,
9,
10,
11,
12
] |
import datetime
from app.api.v2.models.db import Database
now = datetime.datetime.now()
db = Database()
cur = db.cur
class Meetup():
#meetup constructor
def __init__(self, topic, location, tags, happening_on):
self.topic = topic
self.location = location
self.tags = tags
self.happening_on = happening_on
self.created_on = now
def check_if_meetup_exists(self, topic):
query = "SELECT topic from meetups WHERE topic=%s;"
cur.execute(query, (topic,))
meetup = cur.fetchone()
if meetup:
return True
def create_meetup(self):
if self.check_if_meetup_exists(self.topic):
return False
query = "INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) \
RETURNING meetup_id, topic, location, tags, happening_on, created_on;"
cur.execute(
query,
(self.topic,
self.location,
self.tags,
self.happening_on,
self.created_on))
meetup = cur.fetchone()
db.conn.commit()
return meetup
def delete_meetup(meetup_id):
"""Delete a single Meetup"""
query = "DELETE FROM meetups WHERE meetup_id= '{}';".format(meetup_id)
cur.execute(query)
db.conn.commit()
@staticmethod
def get_all_meetups():
'''Method to fetch all meetups'''
query = "SELECT * from meetups;"
cur.execute(query)
meetups = cur.fetchall()
return meetups
@staticmethod
def get_meetup_by_id(meetup_id):
""" Fetch a specific meetup using meetup_id"""
query = "SELECT * from meetups where meetup_id=%s;"
cur.execute(query, (meetup_id,))
meetup = cur.fetchone()
return meetup
|
normal
|
{
"blob_id": "275f8b6ac31792a9e4bb823b61366f868e45ef4e",
"index": 6521,
"step-1": "<mask token>\n\n\nclass Meetup:\n <mask token>\n\n def check_if_meetup_exists(self, topic):\n query = 'SELECT topic from meetups WHERE topic=%s;'\n cur.execute(query, (topic,))\n meetup = cur.fetchone()\n if meetup:\n return True\n <mask token>\n <mask token>\n\n @staticmethod\n def get_all_meetups():\n \"\"\"Method to fetch all meetups\"\"\"\n query = 'SELECT * from meetups;'\n cur.execute(query)\n meetups = cur.fetchall()\n return meetups\n\n @staticmethod\n def get_meetup_by_id(meetup_id):\n \"\"\" Fetch a specific meetup using meetup_id\"\"\"\n query = 'SELECT * from meetups where meetup_id=%s;'\n cur.execute(query, (meetup_id,))\n meetup = cur.fetchone()\n return meetup\n",
"step-2": "<mask token>\n\n\nclass Meetup:\n\n def __init__(self, topic, location, tags, happening_on):\n self.topic = topic\n self.location = location\n self.tags = tags\n self.happening_on = happening_on\n self.created_on = now\n\n def check_if_meetup_exists(self, topic):\n query = 'SELECT topic from meetups WHERE topic=%s;'\n cur.execute(query, (topic,))\n meetup = cur.fetchone()\n if meetup:\n return True\n\n def create_meetup(self):\n if self.check_if_meetup_exists(self.topic):\n return False\n query = (\n 'INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) RETURNING meetup_id, topic, location, tags, happening_on, created_on;'\n )\n cur.execute(query, (self.topic, self.location, self.tags, self.\n happening_on, self.created_on))\n meetup = cur.fetchone()\n db.conn.commit()\n return meetup\n\n def delete_meetup(meetup_id):\n \"\"\"Delete a single Meetup\"\"\"\n query = \"DELETE FROM meetups WHERE meetup_id= '{}';\".format(meetup_id)\n cur.execute(query)\n db.conn.commit()\n\n @staticmethod\n def get_all_meetups():\n \"\"\"Method to fetch all meetups\"\"\"\n query = 'SELECT * from meetups;'\n cur.execute(query)\n meetups = cur.fetchall()\n return meetups\n\n @staticmethod\n def get_meetup_by_id(meetup_id):\n \"\"\" Fetch a specific meetup using meetup_id\"\"\"\n query = 'SELECT * from meetups where meetup_id=%s;'\n cur.execute(query, (meetup_id,))\n meetup = cur.fetchone()\n return meetup\n",
"step-3": "<mask token>\nnow = datetime.datetime.now()\ndb = Database()\ncur = db.cur\n\n\nclass Meetup:\n\n def __init__(self, topic, location, tags, happening_on):\n self.topic = topic\n self.location = location\n self.tags = tags\n self.happening_on = happening_on\n self.created_on = now\n\n def check_if_meetup_exists(self, topic):\n query = 'SELECT topic from meetups WHERE topic=%s;'\n cur.execute(query, (topic,))\n meetup = cur.fetchone()\n if meetup:\n return True\n\n def create_meetup(self):\n if self.check_if_meetup_exists(self.topic):\n return False\n query = (\n 'INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) RETURNING meetup_id, topic, location, tags, happening_on, created_on;'\n )\n cur.execute(query, (self.topic, self.location, self.tags, self.\n happening_on, self.created_on))\n meetup = cur.fetchone()\n db.conn.commit()\n return meetup\n\n def delete_meetup(meetup_id):\n \"\"\"Delete a single Meetup\"\"\"\n query = \"DELETE FROM meetups WHERE meetup_id= '{}';\".format(meetup_id)\n cur.execute(query)\n db.conn.commit()\n\n @staticmethod\n def get_all_meetups():\n \"\"\"Method to fetch all meetups\"\"\"\n query = 'SELECT * from meetups;'\n cur.execute(query)\n meetups = cur.fetchall()\n return meetups\n\n @staticmethod\n def get_meetup_by_id(meetup_id):\n \"\"\" Fetch a specific meetup using meetup_id\"\"\"\n query = 'SELECT * from meetups where meetup_id=%s;'\n cur.execute(query, (meetup_id,))\n meetup = cur.fetchone()\n return meetup\n",
"step-4": "import datetime\nfrom app.api.v2.models.db import Database\nnow = datetime.datetime.now()\ndb = Database()\ncur = db.cur\n\n\nclass Meetup:\n\n def __init__(self, topic, location, tags, happening_on):\n self.topic = topic\n self.location = location\n self.tags = tags\n self.happening_on = happening_on\n self.created_on = now\n\n def check_if_meetup_exists(self, topic):\n query = 'SELECT topic from meetups WHERE topic=%s;'\n cur.execute(query, (topic,))\n meetup = cur.fetchone()\n if meetup:\n return True\n\n def create_meetup(self):\n if self.check_if_meetup_exists(self.topic):\n return False\n query = (\n 'INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) RETURNING meetup_id, topic, location, tags, happening_on, created_on;'\n )\n cur.execute(query, (self.topic, self.location, self.tags, self.\n happening_on, self.created_on))\n meetup = cur.fetchone()\n db.conn.commit()\n return meetup\n\n def delete_meetup(meetup_id):\n \"\"\"Delete a single Meetup\"\"\"\n query = \"DELETE FROM meetups WHERE meetup_id= '{}';\".format(meetup_id)\n cur.execute(query)\n db.conn.commit()\n\n @staticmethod\n def get_all_meetups():\n \"\"\"Method to fetch all meetups\"\"\"\n query = 'SELECT * from meetups;'\n cur.execute(query)\n meetups = cur.fetchall()\n return meetups\n\n @staticmethod\n def get_meetup_by_id(meetup_id):\n \"\"\" Fetch a specific meetup using meetup_id\"\"\"\n query = 'SELECT * from meetups where meetup_id=%s;'\n cur.execute(query, (meetup_id,))\n meetup = cur.fetchone()\n return meetup\n",
"step-5": "import datetime\nfrom app.api.v2.models.db import Database\n\nnow = datetime.datetime.now()\ndb = Database()\ncur = db.cur\n\nclass Meetup():\n\n #meetup constructor\n def __init__(self, topic, location, tags, happening_on):\n self.topic = topic\n self.location = location\n self.tags = tags\n self.happening_on = happening_on\n self.created_on = now\n\n def check_if_meetup_exists(self, topic):\n query = \"SELECT topic from meetups WHERE topic=%s;\"\n cur.execute(query, (topic,))\n meetup = cur.fetchone()\n if meetup:\n return True\n\n def create_meetup(self):\n if self.check_if_meetup_exists(self.topic):\n return False\n query = \"INSERT INTO meetups (topic, location, tags, happening_on, created_on) values (%s, %s, %s, %s, %s) \\\n RETURNING meetup_id, topic, location, tags, happening_on, created_on;\"\n cur.execute(\n query,\n (self.topic,\n self.location,\n self.tags,\n self.happening_on,\n self.created_on))\n meetup = cur.fetchone()\n db.conn.commit()\n return meetup\n\n def delete_meetup(meetup_id):\n \"\"\"Delete a single Meetup\"\"\"\n query = \"DELETE FROM meetups WHERE meetup_id= '{}';\".format(meetup_id)\n cur.execute(query)\n db.conn.commit()\n\n @staticmethod\n def get_all_meetups():\n '''Method to fetch all meetups'''\n query = \"SELECT * from meetups;\"\n cur.execute(query)\n meetups = cur.fetchall()\n return meetups\n\n @staticmethod\n def get_meetup_by_id(meetup_id):\n \"\"\" Fetch a specific meetup using meetup_id\"\"\"\n query = \"SELECT * from meetups where meetup_id=%s;\"\n cur.execute(query, (meetup_id,))\n meetup = cur.fetchone()\n return meetup\n\n",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
from services.BureauActif.libbureauactif.db.Base import db, BaseModel
class BureauActifCalendarDataType(db.Model, BaseModel):
__tablename__ = "ba_calendar_data_type"
id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,
autoincrement=True)
name = db.Column(db.String, nullable=False)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
|
normal
|
{
"blob_id": "83117000f5f34490cb14580a9867b1e871ccc2ae",
"index": 526,
"step-1": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <mask token>\n <mask token>\n <mask token>\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-3": "<mask token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-4": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"step-5": "from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = \"ba_calendar_data_type\"\n id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,\n autoincrement=True)\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n\n db.session.commit()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
"""Welcome to my Quiz:
If you go wrong once you lose but if you give all the answers correct then you win but no CHEATING."""
)
print('Q1:-Who is the president of India?')
<|reserved_special_token_0|>
if seat in winlist:
print('woah you surely are smart you are correct!!!!')
x = x + 1
else:
print('you went wrong at the first question')
x = x - 1
print('Q2:-What is the full form of MCQ?')
<|reserved_special_token_0|>
if seat2 in winlist:
print('you are right!!!!!!')
x = x + 1
else:
print('I told you this is a hard quiz, ur answer is wrong')
x = x - 1
print("Q3:-which city is the india's largest city by population")
<|reserved_special_token_0|>
if seat3 in winlist:
print('you are right!!!')
x = x + 1
else:
print('you were wrong you lose 1 mark')
x = x - 1
print('well ' + str(ghj) + ' you have completed the quiz and scored: ' +
str(x) + ' marks')
<|reserved_special_token_1|>
ghj = input('enter your first name:')
print(
"""Welcome to my Quiz:
If you go wrong once you lose but if you give all the answers correct then you win but no CHEATING."""
)
print('Q1:-Who is the president of India?')
winlist = ('ramnath govind', 'multiple choice question',
'multiple choice questions', 'mumbai')
enter = input('enter your answer here:')
seat = enter.lower()
x = 0
if seat in winlist:
print('woah you surely are smart you are correct!!!!')
x = x + 1
else:
print('you went wrong at the first question')
x = x - 1
print('Q2:-What is the full form of MCQ?')
enter2 = input('enter your answer here:')
seat2 = enter2.lower()
if seat2 in winlist:
print('you are right!!!!!!')
x = x + 1
else:
print('I told you this is a hard quiz, ur answer is wrong')
x = x - 1
print("Q3:-which city is the india's largest city by population")
enter3 = input('enter ur answer here:')
seat3 = enter3.lower()
if seat3 in winlist:
print('you are right!!!')
x = x + 1
else:
print('you were wrong you lose 1 mark')
x = x - 1
print('well ' + str(ghj) + ' you have completed the quiz and scored: ' +
str(x) + ' marks')
<|reserved_special_token_1|>
ghj=input("enter your first name:")
print("Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.")
print("Q1:-Who is the president of India?")
winlist=("ramnath govind","multiple choice question","multiple choice questions","mumbai")
enter=input("enter your answer here:")
seat=enter.lower()
x=0
if seat in winlist:
print("woah you surely are smart you are correct!!!!")
x=x+1
else:
print("you went wrong at the first question")
x=x-1
print("Q2:-What is the full form of MCQ?")
enter2=input("enter your answer here:")
seat2=enter2.lower()
if seat2 in winlist:
print("you are right!!!!!!")
x=x+1
else:
print("I told you this is a hard quiz, ur answer is wrong")
x=x-1
print("Q3:-which city is the india's largest city by population")
enter3=input("enter ur answer here:")
seat3=enter3.lower()
if seat3 in winlist:
print("you are right!!!")
x=x+1
else:
print("you were wrong you lose 1 mark")
x=x-1
print("well " +str(ghj)+ " you have completed the quiz and scored: "+str(x)+" marks")
|
flexible
|
{
"blob_id": "351421ef6a40e3a4bd4549a1851fbf4bed9ddf30",
"index": 5024,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\n<mask token>\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\n<mask token>\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\n<mask token>\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n",
"step-3": "ghj = input('enter your first name:')\nprint(\n \"\"\"Welcome to my Quiz:\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\"\"\"\n )\nprint('Q1:-Who is the president of India?')\nwinlist = ('ramnath govind', 'multiple choice question',\n 'multiple choice questions', 'mumbai')\nenter = input('enter your answer here:')\nseat = enter.lower()\nx = 0\nif seat in winlist:\n print('woah you surely are smart you are correct!!!!')\n x = x + 1\nelse:\n print('you went wrong at the first question')\n x = x - 1\nprint('Q2:-What is the full form of MCQ?')\nenter2 = input('enter your answer here:')\nseat2 = enter2.lower()\nif seat2 in winlist:\n print('you are right!!!!!!')\n x = x + 1\nelse:\n print('I told you this is a hard quiz, ur answer is wrong')\n x = x - 1\nprint(\"Q3:-which city is the india's largest city by population\")\nenter3 = input('enter ur answer here:')\nseat3 = enter3.lower()\nif seat3 in winlist:\n print('you are right!!!')\n x = x + 1\nelse:\n print('you were wrong you lose 1 mark')\n x = x - 1\nprint('well ' + str(ghj) + ' you have completed the quiz and scored: ' +\n str(x) + ' marks')\n",
"step-4": "ghj=input(\"enter your first name:\")\r\nprint(\"Welcome to my Quiz:\\nIf you go wrong once you lose but if you give all the answers correct then you win but no CHEATING.\")\r\nprint(\"Q1:-Who is the president of India?\")\r\nwinlist=(\"ramnath govind\",\"multiple choice question\",\"multiple choice questions\",\"mumbai\")\r\nenter=input(\"enter your answer here:\")\r\nseat=enter.lower()\r\nx=0\r\nif seat in winlist:\r\n print(\"woah you surely are smart you are correct!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you went wrong at the first question\")\r\n x=x-1\r\nprint(\"Q2:-What is the full form of MCQ?\")\r\nenter2=input(\"enter your answer here:\")\r\nseat2=enter2.lower()\r\nif seat2 in winlist:\r\n print(\"you are right!!!!!!\")\r\n x=x+1\r\nelse:\r\n print(\"I told you this is a hard quiz, ur answer is wrong\")\r\n x=x-1\r\nprint(\"Q3:-which city is the india's largest city by population\")\r\nenter3=input(\"enter ur answer here:\")\r\nseat3=enter3.lower()\r\nif seat3 in winlist:\r\n print(\"you are right!!!\")\r\n x=x+1\r\nelse:\r\n print(\"you were wrong you lose 1 mark\")\r\n x=x-1\r\nprint(\"well \" +str(ghj)+ \" you have completed the quiz and scored: \"+str(x)+\" marks\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# On CI, you can pass the logging and the password of dockerhub through
# the environment variables DOCKER_USERNAME and DOCKER_PASSWORD
import getpass
import os
import subprocess
import sys
from builtins import input
SCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))
ROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))
def main(arguments):
docker = [
('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'),
('Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra'),
]
docker_username = os.environ.get('DOCKER_USERNAME', None)
if docker_username is None:
docker_username = input('docker hub user (DOCKER_USERNAME) ? ')
docker_password = os.environ.get('DOCKER_PASSWORD', None)
if docker_password is None:
docker_password = getpass.getpass('docker hub password (DOCKER_PASSWORD) ? ')
_system('docker login -u {0} -p {1}'.format(docker_username, docker_password), logged=False)
for docker_file, docker_image in docker:
_system('docker build -f {0} -t {1} {2}'.format(docker_file, docker_image, ROOT_DIR))
for _, docker_image in docker:
_system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))
_system('docker push {1}/{0}'.format(docker_image, docker_username))
def _system(cmd, logged = True):
if logged:
print('$ {0}'.format(cmd))
if os.system(cmd) > 0:
raise OSError()
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
normal
|
{
"blob_id": "1ad40ef3aa7c81b6eee4fe0b98bcdd2f1110ef8d",
"index": 5990,
"step-1": "<mask token>\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-3": "<mask token>\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-4": "import getpass\nimport os\nimport subprocess\nimport sys\nfrom builtins import input\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\n\ndef main(arguments):\n docker = [('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'), (\n 'Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra')]\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass(\n 'docker hub password (DOCKER_PASSWORD) ? ')\n _system('docker login -u {0} -p {1}'.format(docker_username,\n docker_password), logged=False)\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file,\n docker_image, ROOT_DIR))\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\n\ndef _system(cmd, logged=True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n",
"step-5": "#!/usr/bin/env python\n# On CI, you can pass the logging and the password of dockerhub through\n# the environment variables DOCKER_USERNAME and DOCKER_PASSWORD\n\nimport getpass\nimport os\nimport subprocess\nimport sys\n\nfrom builtins import input\n\nSCRIPT_DIR = os.path.realpath(os.path.join(__file__, '..'))\nROOT_DIR = os.path.realpath(os.path.join(SCRIPT_DIR, '..'))\n\ndef main(arguments):\n docker = [\n ('Dockerfile.ubuntu1804', 'ubuntu1804_ansible_testinfra'),\n ('Dockerfile.ubuntu1604', 'ubuntu1604_ansible_testinfra'),\n ]\n\n docker_username = os.environ.get('DOCKER_USERNAME', None)\n if docker_username is None:\n docker_username = input('docker hub user (DOCKER_USERNAME) ? ')\n\n docker_password = os.environ.get('DOCKER_PASSWORD', None)\n if docker_password is None:\n docker_password = getpass.getpass('docker hub password (DOCKER_PASSWORD) ? ')\n\n _system('docker login -u {0} -p {1}'.format(docker_username, docker_password), logged=False)\n\n for docker_file, docker_image in docker:\n _system('docker build -f {0} -t {1} {2}'.format(docker_file, docker_image, ROOT_DIR))\n\n for _, docker_image in docker:\n _system('docker tag {0} {1}/{0}'.format(docker_image, docker_username))\n _system('docker push {1}/{0}'.format(docker_image, docker_username))\n\ndef _system(cmd, logged = True):\n if logged:\n print('$ {0}'.format(cmd))\n if os.system(cmd) > 0:\n raise OSError()\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for c in range(1, 11):
tree = DecisionTreeClassifier(max_depth=4, random_state=c)
model.append(tree.fit(X_train, y_train))
<|reserved_special_token_0|>
for a in model:
in_sample_accuracy.append(a.score(X_train, y_train))
out_of_sample_accuracy.append(a.score(X_test, y_test))
<|reserved_special_token_0|>
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
<|reserved_special_token_0|>
pd.set_option('precision', 3)
b
<|reserved_special_token_0|>
CVS.append(score)
pd.set_option('precision', 3)
<|reserved_special_token_0|>
dt.fit(X_train, y_train)
<|reserved_special_token_0|>
c
print('My name is Fengkai Xu')
print('My NetID is: fengkai4')
print(
'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
iris_dataset = load_iris()
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,
stratify=y, random_state=42)
model = []
for c in range(1, 11):
tree = DecisionTreeClassifier(max_depth=4, random_state=c)
model.append(tree.fit(X_train, y_train))
in_sample_accuracy = []
out_of_sample_accuracy = []
for a in model:
in_sample_accuracy.append(a.score(X_train, y_train))
out_of_sample_accuracy.append(a.score(X_test, y_test))
a = list(range(1, 11))
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
b = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,
index=['in_sample_accuracy', 'out_of_sample_accuracy'])
pd.set_option('precision', 3)
b
CVS = []
score = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,
y_train, cv=10)
CVS.append(score)
pd.set_option('precision', 3)
c = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',
'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])
c['mean'] = c.mean(1)
c['standard'] = c.std(1)
dt = DecisionTreeClassifier(max_depth=4)
dt.fit(X_train, y_train)
c['Out-of-sample-accuracy'] = dt.score(X_test, y_test)
c
print('My name is Fengkai Xu')
print('My NetID is: fengkai4')
print(
'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'
)
<|reserved_special_token_1|>
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
iris_dataset = load_iris()
X = iris_dataset['data']
y = iris_dataset['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,
stratify=y, random_state=42)
model = []
for c in range(1, 11):
tree = DecisionTreeClassifier(max_depth=4, random_state=c)
model.append(tree.fit(X_train, y_train))
in_sample_accuracy = []
out_of_sample_accuracy = []
for a in model:
in_sample_accuracy.append(a.score(X_train, y_train))
out_of_sample_accuracy.append(a.score(X_test, y_test))
a = list(range(1, 11))
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
b = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,
index=['in_sample_accuracy', 'out_of_sample_accuracy'])
pd.set_option('precision', 3)
b
CVS = []
score = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,
y_train, cv=10)
CVS.append(score)
pd.set_option('precision', 3)
c = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',
'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])
c['mean'] = c.mean(1)
c['standard'] = c.std(1)
dt = DecisionTreeClassifier(max_depth=4)
dt.fit(X_train, y_train)
c['Out-of-sample-accuracy'] = dt.score(X_test, y_test)
c
print('My name is Fengkai Xu')
print('My NetID is: fengkai4')
print(
'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'
)
<|reserved_special_token_1|>
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
iris_dataset=load_iris()
X=iris_dataset['data']
y=iris_dataset['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)
model=[]
for c in range(1,11):
tree=DecisionTreeClassifier(max_depth=4,random_state=c)
model.append(tree.fit(X_train,y_train))
in_sample_accuracy=[]
out_of_sample_accuracy=[]
for a in model:
in_sample_accuracy.append(a.score(X_train,y_train))
out_of_sample_accuracy.append(a.score(X_test,y_test))
a=list(range(1,11))
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
b=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],
columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])
pd.set_option('precision',3)
b
#cross validation
CVS=[]
score=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)
CVS.append(score)
pd.set_option('precision',3)
c=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)
c['mean']=c.mean(1)
c['standard']=c.std(1)
dt=DecisionTreeClassifier(max_depth=4)
dt.fit(X_train,y_train)
c['Out-of-sample-accuracy']=dt.score(X_test,y_test)
c
print("My name is Fengkai Xu")
print("My NetID is: fengkai4")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
|
flexible
|
{
"blob_id": "cc46485a3b5c68e4f77a2f9a033fd2ee2859b52b",
"index": 978,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\n<mask token>\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\n<mask token>\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\n<mask token>\npd.set_option('precision', 3)\nb\n<mask token>\nCVS.append(score)\npd.set_option('precision', 3)\n<mask token>\ndt.fit(X_train, y_train)\n<mask token>\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-3": "<mask token>\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-4": "from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import cross_val_score\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"step-5": "\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import cross_val_score\r\niris_dataset=load_iris()\r\nX=iris_dataset['data']\r\ny=iris_dataset['target']\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)\r\nmodel=[]\r\nfor c in range(1,11):\r\n tree=DecisionTreeClassifier(max_depth=4,random_state=c)\r\n model.append(tree.fit(X_train,y_train))\r\nin_sample_accuracy=[]\r\nout_of_sample_accuracy=[]\r\nfor a in model:\r\n in_sample_accuracy.append(a.score(X_train,y_train))\r\n out_of_sample_accuracy.append(a.score(X_test,y_test))\r\n\r\na=list(range(1,11))\r\na.append('mean')\r\na.append('standard')\r\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\r\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\r\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\r\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\r\nb=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],\r\n columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])\r\npd.set_option('precision',3)\r\nb\r\n#cross validation\r\nCVS=[]\r\nscore=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)\r\nCVS.append(score)\r\npd.set_option('precision',3)\r\nc=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)\r\nc['mean']=c.mean(1)\r\nc['standard']=c.std(1)\r\ndt=DecisionTreeClassifier(max_depth=4)\r\ndt.fit(X_train,y_train)\r\nc['Out-of-sample-accuracy']=dt.score(X_test,y_test)\r\nc\r\nprint(\"My name is Fengkai Xu\")\r\nprint(\"My NetID is: fengkai4\")\r\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import os
TEMP_DIR = os.path.expanduser('~/Documents/MFA')
def make_safe(value):
if isinstance(value, bool):
return str(value).lower()
return str(value)
class MonophoneConfig(object):
'''
Configuration class for monophone training
Scale options defaults to::
['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']
If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::
[1, 5, 10, 15, 20, 25, 30, 35, 38]
Otherwise, ``realign_iters`` will be::
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]
Attributes
----------
num_iters : int
Number of training iterations to perform, defaults to 40
scale_opts : list
Options for specifying scaling in alignment
beam : int
Default beam width for alignment, defaults = 10
retry_beam : int
Beam width to fall back on if no alignment is produced, defaults to 40
max_iter_inc : int
Last iter to increase #Gauss on, defaults to 30
totgauss : int
Total number of gaussians, defaults to 1000
boost_silence : float
Factor by which to boost silence likelihoods in alignment, defaults to 1.0
realign_iters : list
List of iterations to perform alignment
stage : int
Not used
power : float
Exponent for number of gaussians according to occurrence counts, defaults to 0.25
do_fmllr : bool
Specifies whether to do speaker adaptation, defaults to False
do_lda_mllt : bool
Spacifies whether to do LDA + MLLT transformation, default to True
'''
def __init__(self, **kwargs):
self.num_iters = 40
self.scale_opts = ['--transition-scale=1.0',
'--acoustic-scale=0.1',
'--self-loop-scale=0.1']
self.beam = 10
self.retry_beam = 40
self.max_gauss_count = 1000
self.boost_silence = 1.0
if kwargs.get('align_often', False):
self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14,
16, 18, 20, 23, 26, 29, 32, 35, 38]
else:
self.realign_iters = [1, 5, 10, 15, 20, 25, 30, 35, 38]
self.stage = -4
self.power = 0.25
self.do_fmllr = False
self.do_lda_mllt = False
for k, v in kwargs.items():
setattr(self, k, v)
@property
def max_iter_inc(self):
return self.num_iters - 10
@property
def inc_gauss_count(self):
return int((self.max_gauss_count - self.initial_gauss_count) / self.max_iter_inc)
class TriphoneConfig(MonophoneConfig):
'''
Configuration class for triphone training
Scale options defaults to::
['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']
If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::
[1, 5, 10, 15, 20, 25, 30, 35, 38]
Otherwise, ``realign_iters`` will be::
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]
Attributes
----------
num_iters : int
Number of training iterations to perform, defaults to 35
scale_opts : list
Options for specifying scaling in alignment
beam : int
Default beam width for alignment, defaults = 10
retry_beam : int
Beam width to fall back on if no alignment is produced, defaults to 40
max_iter_inc : int
Last iter to increase #Gauss on, defaults to 30
totgauss : int
Total number of gaussians, defaults to 1000
boost_silence : float
Factor by which to boost silence likelihoods in alignment, defaults to 1.0
realign_iters : list
List of iterations to perform alignment
stage : int
Not used
power : float
Exponent for number of gaussians according to occurrence counts, defaults to 0.25
do_fmllr : bool
Specifies whether to do speaker adaptation, defaults to False
do_lda_mllt : bool
Spacifies whether to do LDA + MLLT transformation, default to False
num_states : int
Number of states in the decision tree, defaults to 3100
num_gauss : int
Number of gaussians in the decision tree, defaults to 50000
cluster_threshold : int
For build-tree control final bottom-up clustering of leaves, defaults to 100
'''
def __init__(self, **kwargs):
defaults = {'num_iters': 35,
'initial_gauss_count': 3100,
'max_gauss_count': 50000,
'cluster_threshold': 100,
'do_lda_mllt': False}
defaults.update(kwargs)
super(TriphoneConfig, self).__init__(**defaults)
class TriphoneFmllrConfig(TriphoneConfig):
'''
Configuration class for speaker-adapted triphone training
Scale options defaults to::
['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']
If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::
[1, 5, 10, 15, 20, 25, 30, 35, 38]
Otherwise, ``realign_iters`` will be::
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]
``fmllr_iters`` defaults to::
[2, 4, 6, 12]
Attributes
----------
num_iters : int
Number of training iterations to perform, defaults to 35
scale_opts : list
Options for specifying scaling in alignment
beam : int
Default beam width for alignment, defaults = 10
retry_beam : int
Beam width to fall back on if no alignment is produced, defaults to 40
max_iter_inc : int
Last iter to increase #Gauss on, defaults to 30
totgauss : int
Total number of gaussians, defaults to 1000
boost_silence : float
Factor by which to boost silence likelihoods in alignment, defaults to 1.0
realign_iters : list
List of iterations to perform alignment
stage : int
Not used
power : float
Exponent for number of gaussians according to occurrence counts, defaults to 0.25
do_fmllr : bool
Specifies whether to do speaker adaptation, defaults to True
do_lda_mllt : bool
Spacifies whether to do LDA + MLLT transformation, default to False
num_states : int
Number of states in the decision tree, defaults to 3100
num_gauss : int
Number of gaussians in the decision tree, defaults to 50000
cluster_threshold : int
For build-tree control final bottom-up clustering of leaves, defaults to 100
fmllr_update_type : str
Type of fMLLR estimation, defaults to ``'full'``
fmllr_iters : list
List of iterations to perform fMLLR estimation
fmllr_power : float
Defaults to 0.2
silence_weight : float
Weight on silence in fMLLR estimation
'''
def __init__(self, align_often=True, **kwargs):
defaults = {'do_fmllr': True,
'do_lda_mllt': False,
'fmllr_update_type': 'full',
'fmllr_iters': [2, 4, 6, 12],
'fmllr_power': 0.2,
'silence_weight': 0.0}
defaults.update(kwargs)
super(TriphoneFmllrConfig, self).__init__(**defaults)
# For nnets
class LdaMlltConfig(object):
'''
Configuration class for LDA + MLLT training
Scale options defaults to::
['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']
Attributes
----------
num_iters : int
Number of training iterations to perform
do_fmllr : bool
Specifies whether to do speaker adaptation, defaults to False
do_lda_mllt : bool
Spacifies whether to do LDA + MLLT transformation, default to True
scale_opts : list
Options for specifying scaling in alignment
num_gauss : int
Number of gaussians in the decision tree, defaults to 50000
beam : int
Default beam width for alignment, defaults = 10
retry_beam : int
Beam width to fall back on if no alignment is produced, defaults to 40
cluster_threshold : int
For build-tree control final bottom-up clustering of leaves, defaults to 100
boost_silence : float
Factor by which to boost silence likelihoods in alignment, defaults to 1.0
realign_iters : list
List of iterations to perform alignment
stage : int
Not used
power : float
Exponent for number of gaussians according to occurrence counts, defaults to 0.25
randprune : float
Approximately the ratio by which we will speed up the LDA and MLLT calculations via randomized pruning
'''
def __init__(self, **kwargs):
self.num_iters = 13
self.do_fmllr = False
self.do_lda_mllt = True
self.scale_opts = ['--transition-scale=1.0',
'--acoustic-scale=0.1',
'--self-loop-scale=0.1']
self.num_gauss = 5000
self.beam = 10
self.retry_beam = 40
self.initial_gauss_count = 5000
self.cluster_threshold = -1
self.max_gauss_count = 10000
self.boost_silence = 1.0
self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
self.stage = -5
self.power = 0.25
self.dim = 40
self.careful = False
self.randprune = 4.0
self.splice_opts = ['--left-context=3', '--right-context=3']
self.cluster_thresh = -1
self.norm_vars = False
for k, v in kwargs.items():
setattr(self, k, v)
@property
def max_iter_inc(self):
return self.num_iters
@property
def inc_gauss_count(self):
return int((self.max_gauss_count - self.initial_gauss_count) / self.max_iter_inc)
class DiagUbmConfig(object):
'''
Configuration class for diagonal UBM training
Attributes
----------
num_iters : int
Number of training iterations to perform
num_gselect : int
Number of Gaussian-selection indices to use while training the model
num_gauss : int
Number of Gaussians after clustering down.
'''
def __init__(self, **kwargs):
self.num_iters = 4
self.num_gselect = 30
self.num_frames = 400000
self.num_gauss = 256
self.num_iters_init = 20
self.initial_gauss_proportion = 0.5
self.subsample = 2
self.cleanup = True
self.min_gaussian_weight = 0.0001
self.remove_low_count_gaussians = True
self.num_threads = 32
self.splice_opts = ['--left-context=3', '--right-context=3']
class iVectorExtractorConfig(object):
'''
Configuration class for i-vector extractor training
Attributes
----------
ivector_dim : int
Dimension of the extracted i-vector
ivector_period : int
Number of frames between i-vector extractions
num_iters : int
Number of training iterations to perform
num_gselect : int
Gaussian-selection using diagonal model: number of Gaussians to select
posterior_scale : float
Scale on the acoustic posteriors, intended to account for inter-frame correlations
min_post : float
Minimum posterior to use (posteriors below this are pruned out)
subsample : int
Speeds up training; training on every x'th feature
max_count : int
The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.
'''
def __init__(self, **kwargs):
self.ivector_dim = 100
self.ivector_period = 10
self.num_iters = 10
self.num_gselect = 5
self.posterior_scale = 0.1
self.min_post = 0.025
self.subsample = 2
self.max_count = 0
self.num_threads = 4
self.num_processes = 4
self.splice_opts = ['--left-context=3', '--right-context=3']
self.compress = False
class NnetBasicConfig(object):
'''
Configuration class for neural network training
Attributes
----------
num_epochs : int
Number of epochs of training; number of iterations is worked out from this
iters_per_epoch : int
Number of iterations per epoch
realign_times : int
How many times to realign during training; this will equally space them over the iterations
beam : int
Default beam width for alignment
retry_beam : int
Beam width to fall back on if no alignment is produced
initial_learning_rate : float
The initial learning rate at the beginning of training
final_learning_rate : float
The final learning rate by the end of training
pnorm_input_dim : int
The input dimension of the pnorm component
pnorm_output_dim : int
The output dimension of the pnorm component
p : int
Pnorm parameter
hidden_layer_dim : int
Dimension of a hidden layer
samples_per_iter : int
Number of samples seen per job per each iteration; used when getting examples
shuffle_buffer_size : int
This "buffer_size" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.
add_layers_period : int
Number of iterations between addition of a new layer
num_hidden_layers : int
Number of hidden layers
randprune : float
Speeds up LDA
alpha : float
Relates to preconditioning
mix_up : int
Number of components to mix up to
prior_subset_size : int
Number of samples per job for computing priors
update_period : int
How often the preconditioning subspace is updated
num_samples_history : int
Relates to online preconditioning
preconditioning_rank_in : int
Relates to online preconditioning
preconditioning_rank_out : int
Relates to online preconditioning
'''
def __init__(self, **kwargs):
self.num_epochs = 4
self.num_epochs_extra = 5
self.num_iters_final = 20
self.iters_per_epoch = 2
self.realign_times = 0
self.beam = 10
self.retry_beam = 15000000
self.initial_learning_rate=0.32
self.final_learning_rate=0.032
self.bias_stddev = 0.5
self.pnorm_input_dim = 3000
self.pnorm_output_dim = 300
self.p = 2
self.shrink_interval = 5
self.shrink = True
self.num_frames_shrink = 2000
self.final_learning_rate_factor = 0.5
self.hidden_layer_dim = 50
self.samples_per_iter = 200000
self.shuffle_buffer_size = 5000
self.add_layers_period = 2
self.num_hidden_layers = 3
self.modify_learning_rates = False
self.last_layer_factor = 0.1
self.first_layer_factor = 1.0
self.splice_width = 3
self.randprune = 4.0
self.alpha = 4.0
self.max_change = 10.0
self.mix_up = 12000 # From run_nnet2.sh
self.prior_subset_size = 10000
self.boost_silence = 0.5
self.update_period = 4
self.num_samples_history = 2000
self.max_change_per_sample = 0.075
self.precondition_rank_in = 20
self.precondition_rank_out = 80
class MfccConfig(object):
'''
Configuration class for MFCC generation
The ``config_dict`` currently stores one key ``'use-energy'`` which
defaults to False
Parameters
----------
output_directory : str
Path to directory to save configuration files for Kaldi
kwargs : dict, optional
If specified, updates ``config_dict`` with this dictionary
Attributes
----------
config_dict : dict
Dictionary of configuration parameters
'''
def __init__(self, output_directory, job=None, kwargs=None):
if kwargs is None:
kwargs = {}
self.job = job
self.config_dict = {'use-energy': False, 'frame-shift': 10}
self.config_dict.update(kwargs)
self.output_directory = output_directory
self.write()
def update(self, kwargs):
'''
Update configuration dictionary with new dictionary
Parameters
----------
kwargs : dict
Dictionary of new parameter values
'''
self.config_dict.update(kwargs)
self.write()
@property
def config_directory(self):
path = os.path.join(self.output_directory, 'config')
os.makedirs(path, exist_ok=True)
return path
@property
def path(self):
if self.job is None:
f = 'mfcc.conf'
else:
f = 'mfcc.{}.conf'.format(self.job)
return os.path.join(self.config_directory, f)
def write(self):
'''
Write configuration dictionary to a file for use in Kaldi binaries
'''
with open(self.path, 'w', encoding='utf8') as f:
for k, v in self.config_dict.items():
f.write('--{}={}\n'.format(k, make_safe(v)))
|
normal
|
{
"blob_id": "7cfca56907f0bca7fd62e506414641f942527d1a",
"index": 9624,
"step-1": "<mask token>\n\n\nclass iVectorExtractorConfig(object):\n \"\"\"\n Configuration class for i-vector extractor training\n\n Attributes\n ----------\n ivector_dim : int\n Dimension of the extracted i-vector\n ivector_period : int\n Number of frames between i-vector extractions\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Gaussian-selection using diagonal model: number of Gaussians to select\n posterior_scale : float\n Scale on the acoustic posteriors, intended to account for inter-frame correlations\n min_post : float\n Minimum posterior to use (posteriors below this are pruned out)\n subsample : int\n Speeds up training; training on every x'th feature\n max_count : int\n The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.ivector_dim = 100\n self.ivector_period = 10\n self.num_iters = 10\n self.num_gselect = 5\n self.posterior_scale = 0.1\n self.min_post = 0.025\n self.subsample = 2\n self.max_count = 0\n self.num_threads = 4\n self.num_processes = 4\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.compress = False\n\n\nclass NnetBasicConfig(object):\n \"\"\"\n Configuration class for neural network training\n\n Attributes\n ----------\n num_epochs : int\n Number of epochs of training; number of iterations is worked out from this\n iters_per_epoch : int\n Number of iterations per epoch\n realign_times : int\n How many times to realign during training; this will equally space them over the iterations\n beam : int\n Default beam width for alignment\n retry_beam : int\n Beam width to fall back on if no alignment is produced\n initial_learning_rate : float\n The initial learning rate at the beginning of training\n final_learning_rate : float\n The final learning rate by the end of training\n pnorm_input_dim : int\n The input dimension of the pnorm component\n pnorm_output_dim : int\n The output dimension of the pnorm component\n p : int\n Pnorm parameter\n hidden_layer_dim : int\n Dimension of a hidden layer\n samples_per_iter : int\n Number of samples seen per job per each iteration; used when getting examples\n shuffle_buffer_size : int\n This \"buffer_size\" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.\n add_layers_period : int\n Number of iterations between addition of a new layer\n num_hidden_layers : int\n Number of hidden layers\n randprune : float\n Speeds up LDA\n alpha : float\n Relates to preconditioning\n mix_up : int\n Number of components to mix up to\n prior_subset_size : int\n Number of samples per job for computing priors\n update_period : int\n How often the preconditioning subspace is updated\n num_samples_history : int\n Relates to online preconditioning\n preconditioning_rank_in : int\n Relates to online preconditioning\n preconditioning_rank_out : int\n Relates to online preconditioning\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_epochs = 4\n self.num_epochs_extra = 5\n self.num_iters_final = 20\n self.iters_per_epoch = 2\n self.realign_times = 0\n self.beam = 10\n self.retry_beam = 15000000\n self.initial_learning_rate = 0.32\n self.final_learning_rate = 0.032\n self.bias_stddev = 0.5\n self.pnorm_input_dim = 3000\n self.pnorm_output_dim = 300\n self.p = 2\n self.shrink_interval = 5\n self.shrink = True\n self.num_frames_shrink = 2000\n self.final_learning_rate_factor = 0.5\n self.hidden_layer_dim = 50\n self.samples_per_iter = 200000\n self.shuffle_buffer_size = 5000\n self.add_layers_period = 2\n self.num_hidden_layers = 3\n self.modify_learning_rates = False\n self.last_layer_factor = 0.1\n self.first_layer_factor = 1.0\n self.splice_width = 3\n self.randprune = 4.0\n self.alpha = 4.0\n self.max_change = 10.0\n self.mix_up = 12000\n self.prior_subset_size = 10000\n self.boost_silence = 0.5\n self.update_period = 4\n self.num_samples_history = 2000\n self.max_change_per_sample = 0.075\n self.precondition_rank_in = 20\n self.precondition_rank_out = 80\n\n\nclass MfccConfig(object):\n \"\"\"\n Configuration class for MFCC generation\n\n The ``config_dict`` currently stores one key ``'use-energy'`` which\n defaults to False\n\n Parameters\n ----------\n output_directory : str\n Path to directory to save configuration files for Kaldi\n kwargs : dict, optional\n If specified, updates ``config_dict`` with this dictionary\n\n Attributes\n ----------\n config_dict : dict\n Dictionary of configuration parameters\n \"\"\"\n\n def __init__(self, output_directory, job=None, kwargs=None):\n if kwargs is None:\n kwargs = {}\n self.job = job\n self.config_dict = {'use-energy': False, 'frame-shift': 10}\n self.config_dict.update(kwargs)\n self.output_directory = output_directory\n self.write()\n\n def update(self, kwargs):\n \"\"\"\n Update configuration dictionary with new dictionary\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of new parameter values\n \"\"\"\n self.config_dict.update(kwargs)\n self.write()\n\n @property\n def config_directory(self):\n path = os.path.join(self.output_directory, 'config')\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def path(self):\n if self.job is None:\n f = 'mfcc.conf'\n else:\n f = 'mfcc.{}.conf'.format(self.job)\n return os.path.join(self.config_directory, f)\n\n def write(self):\n \"\"\"\n Write configuration dictionary to a file for use in Kaldi binaries\n \"\"\"\n with open(self.path, 'w', encoding='utf8') as f:\n for k, v in self.config_dict.items():\n f.write('--{}={}\\n'.format(k, make_safe(v)))\n",
"step-2": "<mask token>\n\n\nclass TriphoneFmllrConfig(TriphoneConfig):\n \"\"\"\n Configuration class for speaker-adapted triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n ``fmllr_iters`` defaults to::\n\n [2, 4, 6, 12]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to True\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n fmllr_update_type : str\n Type of fMLLR estimation, defaults to ``'full'``\n fmllr_iters : list\n List of iterations to perform fMLLR estimation\n fmllr_power : float\n Defaults to 0.2\n silence_weight : float\n Weight on silence in fMLLR estimation\n \"\"\"\n\n def __init__(self, align_often=True, **kwargs):\n defaults = {'do_fmllr': True, 'do_lda_mllt': False,\n 'fmllr_update_type': 'full', 'fmllr_iters': [2, 4, 6, 12],\n 'fmllr_power': 0.2, 'silence_weight': 0.0}\n defaults.update(kwargs)\n super(TriphoneFmllrConfig, self).__init__(**defaults)\n\n\nclass LdaMlltConfig(object):\n \"\"\"\n Configuration class for LDA + MLLT training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to True\n scale_opts : list\n Options for specifying scaling in alignment\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n randprune : float\n Approximately the ratio by which we will speed up the LDA and MLLT calculations via randomized pruning\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 13\n self.do_fmllr = False\n self.do_lda_mllt = True\n self.scale_opts = ['--transition-scale=1.0', '--acoustic-scale=0.1',\n '--self-loop-scale=0.1']\n self.num_gauss = 5000\n self.beam = 10\n self.retry_beam = 40\n self.initial_gauss_count = 5000\n self.cluster_threshold = -1\n self.max_gauss_count = 10000\n self.boost_silence = 1.0\n self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n self.stage = -5\n self.power = 0.25\n self.dim = 40\n self.careful = False\n self.randprune = 4.0\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.cluster_thresh = -1\n self.norm_vars = False\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def max_iter_inc(self):\n return self.num_iters\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self\n .max_iter_inc)\n\n\nclass DiagUbmConfig(object):\n \"\"\"\n Configuration class for diagonal UBM training\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Number of Gaussian-selection indices to use while training the model\n num_gauss : int\n Number of Gaussians after clustering down.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 4\n self.num_gselect = 30\n self.num_frames = 400000\n self.num_gauss = 256\n self.num_iters_init = 20\n self.initial_gauss_proportion = 0.5\n self.subsample = 2\n self.cleanup = True\n self.min_gaussian_weight = 0.0001\n self.remove_low_count_gaussians = True\n self.num_threads = 32\n self.splice_opts = ['--left-context=3', '--right-context=3']\n\n\nclass iVectorExtractorConfig(object):\n \"\"\"\n Configuration class for i-vector extractor training\n\n Attributes\n ----------\n ivector_dim : int\n Dimension of the extracted i-vector\n ivector_period : int\n Number of frames between i-vector extractions\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Gaussian-selection using diagonal model: number of Gaussians to select\n posterior_scale : float\n Scale on the acoustic posteriors, intended to account for inter-frame correlations\n min_post : float\n Minimum posterior to use (posteriors below this are pruned out)\n subsample : int\n Speeds up training; training on every x'th feature\n max_count : int\n The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.ivector_dim = 100\n self.ivector_period = 10\n self.num_iters = 10\n self.num_gselect = 5\n self.posterior_scale = 0.1\n self.min_post = 0.025\n self.subsample = 2\n self.max_count = 0\n self.num_threads = 4\n self.num_processes = 4\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.compress = False\n\n\nclass NnetBasicConfig(object):\n \"\"\"\n Configuration class for neural network training\n\n Attributes\n ----------\n num_epochs : int\n Number of epochs of training; number of iterations is worked out from this\n iters_per_epoch : int\n Number of iterations per epoch\n realign_times : int\n How many times to realign during training; this will equally space them over the iterations\n beam : int\n Default beam width for alignment\n retry_beam : int\n Beam width to fall back on if no alignment is produced\n initial_learning_rate : float\n The initial learning rate at the beginning of training\n final_learning_rate : float\n The final learning rate by the end of training\n pnorm_input_dim : int\n The input dimension of the pnorm component\n pnorm_output_dim : int\n The output dimension of the pnorm component\n p : int\n Pnorm parameter\n hidden_layer_dim : int\n Dimension of a hidden layer\n samples_per_iter : int\n Number of samples seen per job per each iteration; used when getting examples\n shuffle_buffer_size : int\n This \"buffer_size\" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.\n add_layers_period : int\n Number of iterations between addition of a new layer\n num_hidden_layers : int\n Number of hidden layers\n randprune : float\n Speeds up LDA\n alpha : float\n Relates to preconditioning\n mix_up : int\n Number of components to mix up to\n prior_subset_size : int\n Number of samples per job for computing priors\n update_period : int\n How often the preconditioning subspace is updated\n num_samples_history : int\n Relates to online preconditioning\n preconditioning_rank_in : int\n Relates to online preconditioning\n preconditioning_rank_out : int\n Relates to online preconditioning\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_epochs = 4\n self.num_epochs_extra = 5\n self.num_iters_final = 20\n self.iters_per_epoch = 2\n self.realign_times = 0\n self.beam = 10\n self.retry_beam = 15000000\n self.initial_learning_rate = 0.32\n self.final_learning_rate = 0.032\n self.bias_stddev = 0.5\n self.pnorm_input_dim = 3000\n self.pnorm_output_dim = 300\n self.p = 2\n self.shrink_interval = 5\n self.shrink = True\n self.num_frames_shrink = 2000\n self.final_learning_rate_factor = 0.5\n self.hidden_layer_dim = 50\n self.samples_per_iter = 200000\n self.shuffle_buffer_size = 5000\n self.add_layers_period = 2\n self.num_hidden_layers = 3\n self.modify_learning_rates = False\n self.last_layer_factor = 0.1\n self.first_layer_factor = 1.0\n self.splice_width = 3\n self.randprune = 4.0\n self.alpha = 4.0\n self.max_change = 10.0\n self.mix_up = 12000\n self.prior_subset_size = 10000\n self.boost_silence = 0.5\n self.update_period = 4\n self.num_samples_history = 2000\n self.max_change_per_sample = 0.075\n self.precondition_rank_in = 20\n self.precondition_rank_out = 80\n\n\nclass MfccConfig(object):\n \"\"\"\n Configuration class for MFCC generation\n\n The ``config_dict`` currently stores one key ``'use-energy'`` which\n defaults to False\n\n Parameters\n ----------\n output_directory : str\n Path to directory to save configuration files for Kaldi\n kwargs : dict, optional\n If specified, updates ``config_dict`` with this dictionary\n\n Attributes\n ----------\n config_dict : dict\n Dictionary of configuration parameters\n \"\"\"\n\n def __init__(self, output_directory, job=None, kwargs=None):\n if kwargs is None:\n kwargs = {}\n self.job = job\n self.config_dict = {'use-energy': False, 'frame-shift': 10}\n self.config_dict.update(kwargs)\n self.output_directory = output_directory\n self.write()\n\n def update(self, kwargs):\n \"\"\"\n Update configuration dictionary with new dictionary\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of new parameter values\n \"\"\"\n self.config_dict.update(kwargs)\n self.write()\n\n @property\n def config_directory(self):\n path = os.path.join(self.output_directory, 'config')\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def path(self):\n if self.job is None:\n f = 'mfcc.conf'\n else:\n f = 'mfcc.{}.conf'.format(self.job)\n return os.path.join(self.config_directory, f)\n\n def write(self):\n \"\"\"\n Write configuration dictionary to a file for use in Kaldi binaries\n \"\"\"\n with open(self.path, 'w', encoding='utf8') as f:\n for k, v in self.config_dict.items():\n f.write('--{}={}\\n'.format(k, make_safe(v)))\n",
"step-3": "<mask token>\n\n\nclass TriphoneConfig(MonophoneConfig):\n \"\"\"\n Configuration class for triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n \"\"\"\n\n def __init__(self, **kwargs):\n defaults = {'num_iters': 35, 'initial_gauss_count': 3100,\n 'max_gauss_count': 50000, 'cluster_threshold': 100,\n 'do_lda_mllt': False}\n defaults.update(kwargs)\n super(TriphoneConfig, self).__init__(**defaults)\n\n\nclass TriphoneFmllrConfig(TriphoneConfig):\n \"\"\"\n Configuration class for speaker-adapted triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n ``fmllr_iters`` defaults to::\n\n [2, 4, 6, 12]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to True\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n fmllr_update_type : str\n Type of fMLLR estimation, defaults to ``'full'``\n fmllr_iters : list\n List of iterations to perform fMLLR estimation\n fmllr_power : float\n Defaults to 0.2\n silence_weight : float\n Weight on silence in fMLLR estimation\n \"\"\"\n\n def __init__(self, align_often=True, **kwargs):\n defaults = {'do_fmllr': True, 'do_lda_mllt': False,\n 'fmllr_update_type': 'full', 'fmllr_iters': [2, 4, 6, 12],\n 'fmllr_power': 0.2, 'silence_weight': 0.0}\n defaults.update(kwargs)\n super(TriphoneFmllrConfig, self).__init__(**defaults)\n\n\nclass LdaMlltConfig(object):\n \"\"\"\n Configuration class for LDA + MLLT training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to True\n scale_opts : list\n Options for specifying scaling in alignment\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n randprune : float\n Approximately the ratio by which we will speed up the LDA and MLLT calculations via randomized pruning\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 13\n self.do_fmllr = False\n self.do_lda_mllt = True\n self.scale_opts = ['--transition-scale=1.0', '--acoustic-scale=0.1',\n '--self-loop-scale=0.1']\n self.num_gauss = 5000\n self.beam = 10\n self.retry_beam = 40\n self.initial_gauss_count = 5000\n self.cluster_threshold = -1\n self.max_gauss_count = 10000\n self.boost_silence = 1.0\n self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n self.stage = -5\n self.power = 0.25\n self.dim = 40\n self.careful = False\n self.randprune = 4.0\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.cluster_thresh = -1\n self.norm_vars = False\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def max_iter_inc(self):\n return self.num_iters\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self\n .max_iter_inc)\n\n\nclass DiagUbmConfig(object):\n \"\"\"\n Configuration class for diagonal UBM training\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Number of Gaussian-selection indices to use while training the model\n num_gauss : int\n Number of Gaussians after clustering down.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 4\n self.num_gselect = 30\n self.num_frames = 400000\n self.num_gauss = 256\n self.num_iters_init = 20\n self.initial_gauss_proportion = 0.5\n self.subsample = 2\n self.cleanup = True\n self.min_gaussian_weight = 0.0001\n self.remove_low_count_gaussians = True\n self.num_threads = 32\n self.splice_opts = ['--left-context=3', '--right-context=3']\n\n\nclass iVectorExtractorConfig(object):\n \"\"\"\n Configuration class for i-vector extractor training\n\n Attributes\n ----------\n ivector_dim : int\n Dimension of the extracted i-vector\n ivector_period : int\n Number of frames between i-vector extractions\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Gaussian-selection using diagonal model: number of Gaussians to select\n posterior_scale : float\n Scale on the acoustic posteriors, intended to account for inter-frame correlations\n min_post : float\n Minimum posterior to use (posteriors below this are pruned out)\n subsample : int\n Speeds up training; training on every x'th feature\n max_count : int\n The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.ivector_dim = 100\n self.ivector_period = 10\n self.num_iters = 10\n self.num_gselect = 5\n self.posterior_scale = 0.1\n self.min_post = 0.025\n self.subsample = 2\n self.max_count = 0\n self.num_threads = 4\n self.num_processes = 4\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.compress = False\n\n\nclass NnetBasicConfig(object):\n \"\"\"\n Configuration class for neural network training\n\n Attributes\n ----------\n num_epochs : int\n Number of epochs of training; number of iterations is worked out from this\n iters_per_epoch : int\n Number of iterations per epoch\n realign_times : int\n How many times to realign during training; this will equally space them over the iterations\n beam : int\n Default beam width for alignment\n retry_beam : int\n Beam width to fall back on if no alignment is produced\n initial_learning_rate : float\n The initial learning rate at the beginning of training\n final_learning_rate : float\n The final learning rate by the end of training\n pnorm_input_dim : int\n The input dimension of the pnorm component\n pnorm_output_dim : int\n The output dimension of the pnorm component\n p : int\n Pnorm parameter\n hidden_layer_dim : int\n Dimension of a hidden layer\n samples_per_iter : int\n Number of samples seen per job per each iteration; used when getting examples\n shuffle_buffer_size : int\n This \"buffer_size\" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.\n add_layers_period : int\n Number of iterations between addition of a new layer\n num_hidden_layers : int\n Number of hidden layers\n randprune : float\n Speeds up LDA\n alpha : float\n Relates to preconditioning\n mix_up : int\n Number of components to mix up to\n prior_subset_size : int\n Number of samples per job for computing priors\n update_period : int\n How often the preconditioning subspace is updated\n num_samples_history : int\n Relates to online preconditioning\n preconditioning_rank_in : int\n Relates to online preconditioning\n preconditioning_rank_out : int\n Relates to online preconditioning\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_epochs = 4\n self.num_epochs_extra = 5\n self.num_iters_final = 20\n self.iters_per_epoch = 2\n self.realign_times = 0\n self.beam = 10\n self.retry_beam = 15000000\n self.initial_learning_rate = 0.32\n self.final_learning_rate = 0.032\n self.bias_stddev = 0.5\n self.pnorm_input_dim = 3000\n self.pnorm_output_dim = 300\n self.p = 2\n self.shrink_interval = 5\n self.shrink = True\n self.num_frames_shrink = 2000\n self.final_learning_rate_factor = 0.5\n self.hidden_layer_dim = 50\n self.samples_per_iter = 200000\n self.shuffle_buffer_size = 5000\n self.add_layers_period = 2\n self.num_hidden_layers = 3\n self.modify_learning_rates = False\n self.last_layer_factor = 0.1\n self.first_layer_factor = 1.0\n self.splice_width = 3\n self.randprune = 4.0\n self.alpha = 4.0\n self.max_change = 10.0\n self.mix_up = 12000\n self.prior_subset_size = 10000\n self.boost_silence = 0.5\n self.update_period = 4\n self.num_samples_history = 2000\n self.max_change_per_sample = 0.075\n self.precondition_rank_in = 20\n self.precondition_rank_out = 80\n\n\nclass MfccConfig(object):\n \"\"\"\n Configuration class for MFCC generation\n\n The ``config_dict`` currently stores one key ``'use-energy'`` which\n defaults to False\n\n Parameters\n ----------\n output_directory : str\n Path to directory to save configuration files for Kaldi\n kwargs : dict, optional\n If specified, updates ``config_dict`` with this dictionary\n\n Attributes\n ----------\n config_dict : dict\n Dictionary of configuration parameters\n \"\"\"\n\n def __init__(self, output_directory, job=None, kwargs=None):\n if kwargs is None:\n kwargs = {}\n self.job = job\n self.config_dict = {'use-energy': False, 'frame-shift': 10}\n self.config_dict.update(kwargs)\n self.output_directory = output_directory\n self.write()\n\n def update(self, kwargs):\n \"\"\"\n Update configuration dictionary with new dictionary\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of new parameter values\n \"\"\"\n self.config_dict.update(kwargs)\n self.write()\n\n @property\n def config_directory(self):\n path = os.path.join(self.output_directory, 'config')\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def path(self):\n if self.job is None:\n f = 'mfcc.conf'\n else:\n f = 'mfcc.{}.conf'.format(self.job)\n return os.path.join(self.config_directory, f)\n\n def write(self):\n \"\"\"\n Write configuration dictionary to a file for use in Kaldi binaries\n \"\"\"\n with open(self.path, 'w', encoding='utf8') as f:\n for k, v in self.config_dict.items():\n f.write('--{}={}\\n'.format(k, make_safe(v)))\n",
"step-4": "<mask token>\n\n\nclass MonophoneConfig(object):\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self\n .max_iter_inc)\n\n\nclass TriphoneConfig(MonophoneConfig):\n \"\"\"\n Configuration class for triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n \"\"\"\n\n def __init__(self, **kwargs):\n defaults = {'num_iters': 35, 'initial_gauss_count': 3100,\n 'max_gauss_count': 50000, 'cluster_threshold': 100,\n 'do_lda_mllt': False}\n defaults.update(kwargs)\n super(TriphoneConfig, self).__init__(**defaults)\n\n\nclass TriphoneFmllrConfig(TriphoneConfig):\n \"\"\"\n Configuration class for speaker-adapted triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n ``fmllr_iters`` defaults to::\n\n [2, 4, 6, 12]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to True\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n fmllr_update_type : str\n Type of fMLLR estimation, defaults to ``'full'``\n fmllr_iters : list\n List of iterations to perform fMLLR estimation\n fmllr_power : float\n Defaults to 0.2\n silence_weight : float\n Weight on silence in fMLLR estimation\n \"\"\"\n\n def __init__(self, align_often=True, **kwargs):\n defaults = {'do_fmllr': True, 'do_lda_mllt': False,\n 'fmllr_update_type': 'full', 'fmllr_iters': [2, 4, 6, 12],\n 'fmllr_power': 0.2, 'silence_weight': 0.0}\n defaults.update(kwargs)\n super(TriphoneFmllrConfig, self).__init__(**defaults)\n\n\nclass LdaMlltConfig(object):\n \"\"\"\n Configuration class for LDA + MLLT training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to True\n scale_opts : list\n Options for specifying scaling in alignment\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n randprune : float\n Approximately the ratio by which we will speed up the LDA and MLLT calculations via randomized pruning\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 13\n self.do_fmllr = False\n self.do_lda_mllt = True\n self.scale_opts = ['--transition-scale=1.0', '--acoustic-scale=0.1',\n '--self-loop-scale=0.1']\n self.num_gauss = 5000\n self.beam = 10\n self.retry_beam = 40\n self.initial_gauss_count = 5000\n self.cluster_threshold = -1\n self.max_gauss_count = 10000\n self.boost_silence = 1.0\n self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n self.stage = -5\n self.power = 0.25\n self.dim = 40\n self.careful = False\n self.randprune = 4.0\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.cluster_thresh = -1\n self.norm_vars = False\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def max_iter_inc(self):\n return self.num_iters\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self\n .max_iter_inc)\n\n\nclass DiagUbmConfig(object):\n \"\"\"\n Configuration class for diagonal UBM training\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Number of Gaussian-selection indices to use while training the model\n num_gauss : int\n Number of Gaussians after clustering down.\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_iters = 4\n self.num_gselect = 30\n self.num_frames = 400000\n self.num_gauss = 256\n self.num_iters_init = 20\n self.initial_gauss_proportion = 0.5\n self.subsample = 2\n self.cleanup = True\n self.min_gaussian_weight = 0.0001\n self.remove_low_count_gaussians = True\n self.num_threads = 32\n self.splice_opts = ['--left-context=3', '--right-context=3']\n\n\nclass iVectorExtractorConfig(object):\n \"\"\"\n Configuration class for i-vector extractor training\n\n Attributes\n ----------\n ivector_dim : int\n Dimension of the extracted i-vector\n ivector_period : int\n Number of frames between i-vector extractions\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Gaussian-selection using diagonal model: number of Gaussians to select\n posterior_scale : float\n Scale on the acoustic posteriors, intended to account for inter-frame correlations\n min_post : float\n Minimum posterior to use (posteriors below this are pruned out)\n subsample : int\n Speeds up training; training on every x'th feature\n max_count : int\n The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.\n \"\"\"\n\n def __init__(self, **kwargs):\n self.ivector_dim = 100\n self.ivector_period = 10\n self.num_iters = 10\n self.num_gselect = 5\n self.posterior_scale = 0.1\n self.min_post = 0.025\n self.subsample = 2\n self.max_count = 0\n self.num_threads = 4\n self.num_processes = 4\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.compress = False\n\n\nclass NnetBasicConfig(object):\n \"\"\"\n Configuration class for neural network training\n\n Attributes\n ----------\n num_epochs : int\n Number of epochs of training; number of iterations is worked out from this\n iters_per_epoch : int\n Number of iterations per epoch\n realign_times : int\n How many times to realign during training; this will equally space them over the iterations\n beam : int\n Default beam width for alignment\n retry_beam : int\n Beam width to fall back on if no alignment is produced\n initial_learning_rate : float\n The initial learning rate at the beginning of training\n final_learning_rate : float\n The final learning rate by the end of training\n pnorm_input_dim : int\n The input dimension of the pnorm component\n pnorm_output_dim : int\n The output dimension of the pnorm component\n p : int\n Pnorm parameter\n hidden_layer_dim : int\n Dimension of a hidden layer\n samples_per_iter : int\n Number of samples seen per job per each iteration; used when getting examples\n shuffle_buffer_size : int\n This \"buffer_size\" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.\n add_layers_period : int\n Number of iterations between addition of a new layer\n num_hidden_layers : int\n Number of hidden layers\n randprune : float\n Speeds up LDA\n alpha : float\n Relates to preconditioning\n mix_up : int\n Number of components to mix up to\n prior_subset_size : int\n Number of samples per job for computing priors\n update_period : int\n How often the preconditioning subspace is updated\n num_samples_history : int\n Relates to online preconditioning\n preconditioning_rank_in : int\n Relates to online preconditioning\n preconditioning_rank_out : int\n Relates to online preconditioning\n\n \"\"\"\n\n def __init__(self, **kwargs):\n self.num_epochs = 4\n self.num_epochs_extra = 5\n self.num_iters_final = 20\n self.iters_per_epoch = 2\n self.realign_times = 0\n self.beam = 10\n self.retry_beam = 15000000\n self.initial_learning_rate = 0.32\n self.final_learning_rate = 0.032\n self.bias_stddev = 0.5\n self.pnorm_input_dim = 3000\n self.pnorm_output_dim = 300\n self.p = 2\n self.shrink_interval = 5\n self.shrink = True\n self.num_frames_shrink = 2000\n self.final_learning_rate_factor = 0.5\n self.hidden_layer_dim = 50\n self.samples_per_iter = 200000\n self.shuffle_buffer_size = 5000\n self.add_layers_period = 2\n self.num_hidden_layers = 3\n self.modify_learning_rates = False\n self.last_layer_factor = 0.1\n self.first_layer_factor = 1.0\n self.splice_width = 3\n self.randprune = 4.0\n self.alpha = 4.0\n self.max_change = 10.0\n self.mix_up = 12000\n self.prior_subset_size = 10000\n self.boost_silence = 0.5\n self.update_period = 4\n self.num_samples_history = 2000\n self.max_change_per_sample = 0.075\n self.precondition_rank_in = 20\n self.precondition_rank_out = 80\n\n\nclass MfccConfig(object):\n \"\"\"\n Configuration class for MFCC generation\n\n The ``config_dict`` currently stores one key ``'use-energy'`` which\n defaults to False\n\n Parameters\n ----------\n output_directory : str\n Path to directory to save configuration files for Kaldi\n kwargs : dict, optional\n If specified, updates ``config_dict`` with this dictionary\n\n Attributes\n ----------\n config_dict : dict\n Dictionary of configuration parameters\n \"\"\"\n\n def __init__(self, output_directory, job=None, kwargs=None):\n if kwargs is None:\n kwargs = {}\n self.job = job\n self.config_dict = {'use-energy': False, 'frame-shift': 10}\n self.config_dict.update(kwargs)\n self.output_directory = output_directory\n self.write()\n\n def update(self, kwargs):\n \"\"\"\n Update configuration dictionary with new dictionary\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of new parameter values\n \"\"\"\n self.config_dict.update(kwargs)\n self.write()\n\n @property\n def config_directory(self):\n path = os.path.join(self.output_directory, 'config')\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def path(self):\n if self.job is None:\n f = 'mfcc.conf'\n else:\n f = 'mfcc.{}.conf'.format(self.job)\n return os.path.join(self.config_directory, f)\n\n def write(self):\n \"\"\"\n Write configuration dictionary to a file for use in Kaldi binaries\n \"\"\"\n with open(self.path, 'w', encoding='utf8') as f:\n for k, v in self.config_dict.items():\n f.write('--{}={}\\n'.format(k, make_safe(v)))\n",
"step-5": "import os\n\nTEMP_DIR = os.path.expanduser('~/Documents/MFA')\n\n\ndef make_safe(value):\n if isinstance(value, bool):\n return str(value).lower()\n return str(value)\n\n\nclass MonophoneConfig(object):\n '''\n Configuration class for monophone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 40\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to True\n '''\n\n def __init__(self, **kwargs):\n self.num_iters = 40\n\n self.scale_opts = ['--transition-scale=1.0',\n '--acoustic-scale=0.1',\n '--self-loop-scale=0.1']\n self.beam = 10\n self.retry_beam = 40\n self.max_gauss_count = 1000\n self.boost_silence = 1.0\n if kwargs.get('align_often', False):\n self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14,\n 16, 18, 20, 23, 26, 29, 32, 35, 38]\n else:\n self.realign_iters = [1, 5, 10, 15, 20, 25, 30, 35, 38]\n self.stage = -4\n self.power = 0.25\n\n self.do_fmllr = False\n self.do_lda_mllt = False\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def max_iter_inc(self):\n return self.num_iters - 10\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self.max_iter_inc)\n\n\nclass TriphoneConfig(MonophoneConfig):\n '''\n Configuration class for triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n '''\n\n def __init__(self, **kwargs):\n defaults = {'num_iters': 35,\n 'initial_gauss_count': 3100,\n 'max_gauss_count': 50000,\n 'cluster_threshold': 100,\n 'do_lda_mllt': False}\n defaults.update(kwargs)\n super(TriphoneConfig, self).__init__(**defaults)\n\n\nclass TriphoneFmllrConfig(TriphoneConfig):\n '''\n Configuration class for speaker-adapted triphone training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n If ``align_often`` is True in the keyword arguments, ``realign_iters`` will be::\n\n [1, 5, 10, 15, 20, 25, 30, 35, 38]\n\n Otherwise, ``realign_iters`` will be::\n\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 16, 18, 20, 23, 26, 29, 32, 35, 38]\n\n ``fmllr_iters`` defaults to::\n\n [2, 4, 6, 12]\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform, defaults to 35\n scale_opts : list\n Options for specifying scaling in alignment\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n max_iter_inc : int\n Last iter to increase #Gauss on, defaults to 30\n totgauss : int\n Total number of gaussians, defaults to 1000\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to True\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to False\n num_states : int\n Number of states in the decision tree, defaults to 3100\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n fmllr_update_type : str\n Type of fMLLR estimation, defaults to ``'full'``\n fmllr_iters : list\n List of iterations to perform fMLLR estimation\n fmllr_power : float\n Defaults to 0.2\n silence_weight : float\n Weight on silence in fMLLR estimation\n '''\n\n def __init__(self, align_often=True, **kwargs):\n defaults = {'do_fmllr': True,\n 'do_lda_mllt': False,\n 'fmllr_update_type': 'full',\n 'fmllr_iters': [2, 4, 6, 12],\n 'fmllr_power': 0.2,\n 'silence_weight': 0.0}\n defaults.update(kwargs)\n super(TriphoneFmllrConfig, self).__init__(**defaults)\n\n# For nnets\nclass LdaMlltConfig(object):\n '''\n Configuration class for LDA + MLLT training\n\n Scale options defaults to::\n\n ['--transition-scale=1.0', '--acoustic-scale=0.1', '--self-loop-scale=0.1']\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n do_fmllr : bool\n Specifies whether to do speaker adaptation, defaults to False\n do_lda_mllt : bool\n Spacifies whether to do LDA + MLLT transformation, default to True\n scale_opts : list\n Options for specifying scaling in alignment\n num_gauss : int\n Number of gaussians in the decision tree, defaults to 50000\n beam : int\n Default beam width for alignment, defaults = 10\n retry_beam : int\n Beam width to fall back on if no alignment is produced, defaults to 40\n cluster_threshold : int\n For build-tree control final bottom-up clustering of leaves, defaults to 100\n boost_silence : float\n Factor by which to boost silence likelihoods in alignment, defaults to 1.0\n realign_iters : list\n List of iterations to perform alignment\n stage : int\n Not used\n power : float\n Exponent for number of gaussians according to occurrence counts, defaults to 0.25\n randprune : float\n Approximately the ratio by which we will speed up the LDA and MLLT calculations via randomized pruning\n '''\n def __init__(self, **kwargs):\n self.num_iters = 13\n self.do_fmllr = False\n self.do_lda_mllt = True\n\n self.scale_opts = ['--transition-scale=1.0',\n '--acoustic-scale=0.1',\n '--self-loop-scale=0.1']\n self.num_gauss = 5000\n self.beam = 10\n self.retry_beam = 40\n self.initial_gauss_count = 5000\n self.cluster_threshold = -1\n self.max_gauss_count = 10000\n self.boost_silence = 1.0\n self.realign_iters = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n self.stage = -5\n self.power = 0.25\n\n self.dim = 40\n self.careful = False\n self.randprune = 4.0\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.cluster_thresh = -1\n self.norm_vars = False\n\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n @property\n def max_iter_inc(self):\n return self.num_iters\n\n @property\n def inc_gauss_count(self):\n return int((self.max_gauss_count - self.initial_gauss_count) / self.max_iter_inc)\n\nclass DiagUbmConfig(object):\n '''\n Configuration class for diagonal UBM training\n\n Attributes\n ----------\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Number of Gaussian-selection indices to use while training the model\n num_gauss : int\n Number of Gaussians after clustering down.\n\n '''\n def __init__(self, **kwargs):\n self.num_iters = 4\n self.num_gselect = 30\n self.num_frames = 400000\n self.num_gauss = 256\n\n self.num_iters_init = 20\n self.initial_gauss_proportion = 0.5\n self.subsample = 2\n self.cleanup = True\n self.min_gaussian_weight = 0.0001\n\n self.remove_low_count_gaussians = True\n self.num_threads = 32\n self.splice_opts = ['--left-context=3', '--right-context=3']\n\nclass iVectorExtractorConfig(object):\n '''\n Configuration class for i-vector extractor training\n\n Attributes\n ----------\n ivector_dim : int\n Dimension of the extracted i-vector\n ivector_period : int\n Number of frames between i-vector extractions\n num_iters : int\n Number of training iterations to perform\n num_gselect : int\n Gaussian-selection using diagonal model: number of Gaussians to select\n posterior_scale : float\n Scale on the acoustic posteriors, intended to account for inter-frame correlations\n min_post : float\n Minimum posterior to use (posteriors below this are pruned out)\n subsample : int\n Speeds up training; training on every x'th feature\n max_count : int\n The use of this option (e.g. --max-count 100) can make iVectors more consistent for different lengths of utterance, by scaling up the prior term when the data-count exceeds this value. The data-count is after posterior-scaling, so assuming the posterior-scale is 0.1, --max-count 100 starts having effect after 1000 frames, or 10 seconds of data.\n '''\n def __init__(self, **kwargs):\n self.ivector_dim = 100\n self.ivector_period = 10\n self.num_iters = 10\n self.num_gselect = 5\n self.posterior_scale = 0.1\n\n self.min_post = 0.025\n self.subsample = 2\n self.max_count = 0\n\n self.num_threads = 4\n self.num_processes = 4\n\n self.splice_opts = ['--left-context=3', '--right-context=3']\n self.compress = False\n\nclass NnetBasicConfig(object):\n '''\n Configuration class for neural network training\n\n Attributes\n ----------\n num_epochs : int\n Number of epochs of training; number of iterations is worked out from this\n iters_per_epoch : int\n Number of iterations per epoch\n realign_times : int\n How many times to realign during training; this will equally space them over the iterations\n beam : int\n Default beam width for alignment\n retry_beam : int\n Beam width to fall back on if no alignment is produced\n initial_learning_rate : float\n The initial learning rate at the beginning of training\n final_learning_rate : float\n The final learning rate by the end of training\n pnorm_input_dim : int\n The input dimension of the pnorm component\n pnorm_output_dim : int\n The output dimension of the pnorm component\n p : int\n Pnorm parameter\n hidden_layer_dim : int\n Dimension of a hidden layer\n samples_per_iter : int\n Number of samples seen per job per each iteration; used when getting examples\n shuffle_buffer_size : int\n This \"buffer_size\" variable controls randomization of the samples on each iter. You could set it to 0 or to a large value for complete randomization, but this would both consume memory and cause spikes in disk I/O. Smaller is easier on disk and memory but less random. It's not a huge deal though, as samples are anyway randomized right at the start. (the point of this is to get data in different minibatches on different iterations, since in the preconditioning method, 2 samples in the same minibatch can affect each others' gradients.\n add_layers_period : int\n Number of iterations between addition of a new layer\n num_hidden_layers : int\n Number of hidden layers\n randprune : float\n Speeds up LDA\n alpha : float\n Relates to preconditioning\n mix_up : int\n Number of components to mix up to\n prior_subset_size : int\n Number of samples per job for computing priors\n update_period : int\n How often the preconditioning subspace is updated\n num_samples_history : int\n Relates to online preconditioning\n preconditioning_rank_in : int\n Relates to online preconditioning\n preconditioning_rank_out : int\n Relates to online preconditioning\n\n '''\n def __init__(self, **kwargs):\n self.num_epochs = 4\n self.num_epochs_extra = 5\n self.num_iters_final = 20\n self.iters_per_epoch = 2\n self.realign_times = 0\n\n self.beam = 10\n self.retry_beam = 15000000\n\n self.initial_learning_rate=0.32\n self.final_learning_rate=0.032\n self.bias_stddev = 0.5\n\n self.pnorm_input_dim = 3000\n self.pnorm_output_dim = 300\n self.p = 2\n\n self.shrink_interval = 5\n self.shrink = True\n self.num_frames_shrink = 2000\n\n self.final_learning_rate_factor = 0.5\n self.hidden_layer_dim = 50\n\n self.samples_per_iter = 200000\n self.shuffle_buffer_size = 5000\n self.add_layers_period = 2\n self.num_hidden_layers = 3\n self.modify_learning_rates = False\n\n self.last_layer_factor = 0.1\n self.first_layer_factor = 1.0\n\n self.splice_width = 3\n self.randprune = 4.0\n self.alpha = 4.0\n self.max_change = 10.0\n self.mix_up = 12000 # From run_nnet2.sh\n self.prior_subset_size = 10000\n self.boost_silence = 0.5\n\n self.update_period = 4\n self.num_samples_history = 2000\n self.max_change_per_sample = 0.075\n self.precondition_rank_in = 20\n self.precondition_rank_out = 80\n\nclass MfccConfig(object):\n '''\n Configuration class for MFCC generation\n\n The ``config_dict`` currently stores one key ``'use-energy'`` which\n defaults to False\n\n Parameters\n ----------\n output_directory : str\n Path to directory to save configuration files for Kaldi\n kwargs : dict, optional\n If specified, updates ``config_dict`` with this dictionary\n\n Attributes\n ----------\n config_dict : dict\n Dictionary of configuration parameters\n '''\n\n def __init__(self, output_directory, job=None, kwargs=None):\n if kwargs is None:\n kwargs = {}\n self.job = job\n self.config_dict = {'use-energy': False, 'frame-shift': 10}\n self.config_dict.update(kwargs)\n self.output_directory = output_directory\n self.write()\n\n def update(self, kwargs):\n '''\n Update configuration dictionary with new dictionary\n\n Parameters\n ----------\n kwargs : dict\n Dictionary of new parameter values\n '''\n self.config_dict.update(kwargs)\n self.write()\n\n @property\n def config_directory(self):\n path = os.path.join(self.output_directory, 'config')\n os.makedirs(path, exist_ok=True)\n return path\n\n @property\n def path(self):\n if self.job is None:\n f = 'mfcc.conf'\n else:\n f = 'mfcc.{}.conf'.format(self.job)\n return os.path.join(self.config_directory, f)\n\n def write(self):\n '''\n Write configuration dictionary to a file for use in Kaldi binaries\n '''\n with open(self.path, 'w', encoding='utf8') as f:\n for k, v in self.config_dict.items():\n f.write('--{}={}\\n'.format(k, make_safe(v)))\n",
"step-ids": [
13,
24,
27,
29,
36
]
}
|
[
13,
24,
27,
29,
36
] |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# auther : xiaojinsong([email protected])
parts = ['Is', 'Chicago', 'Not', 'Chicago?']
data = ['ACME', 50, 91.1]
print(' '.join(parts))
def generate_str():
print(','.join(str(d) for d in data))
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts=[]
size = 0
yield ''.join(parts)
if __name__ == '__main__':
generate_str()
text = ','.join(sample())
print(text)
with open('combine.txt', 'w') as f:
for part in combine(sample(), 32768):
f.write(part)
|
normal
|
{
"blob_id": "4ce1e802831f09e503d18fd287cb35400986e3c8",
"index": 8095,
"step-1": "<mask token>\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\n<mask token>\n",
"step-3": "<mask token>\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts = []\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)\n",
"step-4": "parts = ['Is', 'Chicago', 'Not', 'Chicago?']\ndata = ['ACME', 50, 91.1]\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts = []\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)\n",
"step-5": "#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# auther : xiaojinsong([email protected])\n\n\nparts = ['Is', 'Chicago', 'Not', 'Chicago?']\ndata = ['ACME', 50, 91.1]\nprint(' '.join(parts))\n\n\ndef generate_str():\n print(','.join(str(d) for d in data))\n\n\ndef sample():\n yield 'Is'\n yield 'Chicago'\n yield 'Not'\n yield 'Chicago?'\n\n\ndef combine(source, maxsize):\n parts = []\n size = 0\n for part in source:\n parts.append(part)\n size += len(part)\n if size > maxsize:\n yield ''.join(parts)\n parts=[]\n size = 0\n yield ''.join(parts)\n\n\nif __name__ == '__main__':\n generate_str()\n text = ','.join(sample())\n print(text)\n with open('combine.txt', 'w') as f:\n for part in combine(sample(), 32768):\n f.write(part)",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
<|reserved_special_token_0|>
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
<|reserved_special_token_0|>
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
<|reserved_special_token_1|>
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw()
filename = askopenfilename()
return filename
def hash_sha512(message):
h = SHA512.new()
h.update(str(message))
signature = h.hexdigest()
return signature
def main():
decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'
)
if decision == str(1):
execfile('RSAencr.py')
elif decision == str(2):
execfile('RSAdecr.py')
else:
exit(4)
main()
<|reserved_special_token_1|>
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
from Crypto.Hash import SHA512
from Crypto.PublicKey import RSA
from Crypto import Random
from collections import Counter
from Tkinter import Tk
from tkFileDialog import askopenfilename
import ast
import os
import tkMessageBox
from Tkinter import Tk
from tkFileDialog import askopenfilename
import Tkinter
import tkSimpleDialog
import tkMessageBox
fileDir = os.path.dirname(os.path.realpath('__file__'))
def ask_user(prompt, command):
root = Tkinter.Tk()
var = tkSimpleDialog.askstring(str(prompt), str(command))
#print var
return var
def read_file_line(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_key_file(key_name):
filename = os.path.join(fileDir, str(key_name))
with open(filename, 'r') as f:
read_data = f.readline()
return read_data
def read_file_all(file_name):
filename = os.path.join(fileDir, str(file_name))
with open(filename, 'r') as f:
read_data = f.readlines()
return read_data
def pop_window(title, message):
tkMessageBox.showinfo(title, message)
def select_file():
Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
return filename
def hash_sha512(message):
# SHA512 HASHING OF THE INPUT FILE
h = SHA512.new()
h.update(str(message))
# digest() Return the binary (non-printable) digest of the message that has been hashed so far.
# hexdigest() Return the printable digest of the message that has been hashed so far.
signature = h.hexdigest()
return signature
def main():
decision = ask_user("DECIDE", "RSA: type 1 to add file or type 2 to verify")
if decision == str(1):
execfile("RSAencr.py")
elif decision == str(2):
execfile("RSAdecr.py")
else:
exit(4)
main()
|
flexible
|
{
"blob_id": "da696961fea72e1482beae73c19b042b94d93886",
"index": 1660,
"step-1": "<mask token>\n\n\ndef read_file_all(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readlines()\n return read_data\n\n\n<mask token>\n\n\ndef select_file():\n Tk().withdraw()\n filename = askopenfilename()\n return filename\n\n\ndef hash_sha512(message):\n h = SHA512.new()\n h.update(str(message))\n signature = h.hexdigest()\n return signature\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef ask_user(prompt, command):\n root = Tkinter.Tk()\n var = tkSimpleDialog.askstring(str(prompt), str(command))\n return var\n\n\ndef read_file_line(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_key_file(key_name):\n filename = os.path.join(fileDir, str(key_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_file_all(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readlines()\n return read_data\n\n\ndef pop_window(title, message):\n tkMessageBox.showinfo(title, message)\n\n\ndef select_file():\n Tk().withdraw()\n filename = askopenfilename()\n return filename\n\n\ndef hash_sha512(message):\n h = SHA512.new()\n h.update(str(message))\n signature = h.hexdigest()\n return signature\n\n\ndef main():\n decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'\n )\n if decision == str(1):\n execfile('RSAencr.py')\n elif decision == str(2):\n execfile('RSAdecr.py')\n else:\n exit(4)\n\n\nmain()\n",
"step-3": "<mask token>\nfileDir = os.path.dirname(os.path.realpath('__file__'))\n\n\ndef ask_user(prompt, command):\n root = Tkinter.Tk()\n var = tkSimpleDialog.askstring(str(prompt), str(command))\n return var\n\n\ndef read_file_line(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_key_file(key_name):\n filename = os.path.join(fileDir, str(key_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_file_all(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readlines()\n return read_data\n\n\ndef pop_window(title, message):\n tkMessageBox.showinfo(title, message)\n\n\ndef select_file():\n Tk().withdraw()\n filename = askopenfilename()\n return filename\n\n\ndef hash_sha512(message):\n h = SHA512.new()\n h.update(str(message))\n signature = h.hexdigest()\n return signature\n\n\ndef main():\n decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'\n )\n if decision == str(1):\n execfile('RSAencr.py')\n elif decision == str(2):\n execfile('RSAdecr.py')\n else:\n exit(4)\n\n\nmain()\n",
"step-4": "from Crypto.Hash import SHA512\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nfrom collections import Counter\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport ast\nimport os\nimport tkMessageBox\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport Tkinter\nimport tkSimpleDialog\nimport tkMessageBox\nfrom Crypto.Hash import SHA512\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nfrom collections import Counter\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport ast\nimport os\nimport tkMessageBox\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport Tkinter\nimport tkSimpleDialog\nimport tkMessageBox\nfileDir = os.path.dirname(os.path.realpath('__file__'))\n\n\ndef ask_user(prompt, command):\n root = Tkinter.Tk()\n var = tkSimpleDialog.askstring(str(prompt), str(command))\n return var\n\n\ndef read_file_line(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_key_file(key_name):\n filename = os.path.join(fileDir, str(key_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_file_all(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readlines()\n return read_data\n\n\ndef pop_window(title, message):\n tkMessageBox.showinfo(title, message)\n\n\ndef select_file():\n Tk().withdraw()\n filename = askopenfilename()\n return filename\n\n\ndef hash_sha512(message):\n h = SHA512.new()\n h.update(str(message))\n signature = h.hexdigest()\n return signature\n\n\ndef main():\n decision = ask_user('DECIDE', 'RSA: type 1 to add file or type 2 to verify'\n )\n if decision == str(1):\n execfile('RSAencr.py')\n elif decision == str(2):\n execfile('RSAdecr.py')\n else:\n exit(4)\n\n\nmain()\n",
"step-5": "from Crypto.Hash import SHA512\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nfrom collections import Counter\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport ast\nimport os\nimport tkMessageBox\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport Tkinter\nimport tkSimpleDialog\nimport tkMessageBox\nfrom Crypto.Hash import SHA512\nfrom Crypto.PublicKey import RSA\nfrom Crypto import Random\nfrom collections import Counter\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport ast\nimport os\nimport tkMessageBox\nfrom Tkinter import Tk\nfrom tkFileDialog import askopenfilename\nimport Tkinter\nimport tkSimpleDialog\nimport tkMessageBox\n\n\nfileDir = os.path.dirname(os.path.realpath('__file__'))\n\ndef ask_user(prompt, command):\n root = Tkinter.Tk()\n var = tkSimpleDialog.askstring(str(prompt), str(command))\n #print var\n return var\n\ndef read_file_line(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_key_file(key_name):\n filename = os.path.join(fileDir, str(key_name))\n with open(filename, 'r') as f:\n read_data = f.readline()\n return read_data\n\n\ndef read_file_all(file_name):\n filename = os.path.join(fileDir, str(file_name))\n with open(filename, 'r') as f:\n read_data = f.readlines()\n return read_data\n\n\ndef pop_window(title, message):\n tkMessageBox.showinfo(title, message)\n\n\ndef select_file():\n Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing\n filename = askopenfilename() # show an \"Open\" dialog box and return the path to the selected file\n return filename\n\n\ndef hash_sha512(message):\n # SHA512 HASHING OF THE INPUT FILE\n h = SHA512.new()\n h.update(str(message))\n # digest() Return the binary (non-printable) digest of the message that has been hashed so far.\n # hexdigest() Return the printable digest of the message that has been hashed so far.\n signature = h.hexdigest()\n return signature\n\n\ndef main():\n decision = ask_user(\"DECIDE\", \"RSA: type 1 to add file or type 2 to verify\")\n\n if decision == str(1):\n execfile(\"RSAencr.py\")\n elif decision == str(2):\n execfile(\"RSAdecr.py\")\n else:\n exit(4)\n\nmain()",
"step-ids": [
3,
9,
10,
11,
12
]
}
|
[
3,
9,
10,
11,
12
] |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TileMapScalePlugin
A QGIS plugin
Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.
-------------------
begin : 2014-03-03
copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
This script initializes the plugin, making it known to QGIS.
"""
def classFactory(iface):
# load TileMapScalePlugin class from file TileMapScalePlugin
from .tilemapscaleplugin import TileMapScalePlugin
return TileMapScalePlugin(iface)
|
normal
|
{
"blob_id": "f2e2ebd5b848cf3a01b7304e5e194beb3eec1c10",
"index": 1214,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef classFactory(iface):\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-3": "# -*- coding: utf-8 -*-\n\"\"\"\n/***************************************************************************\n TileMapScalePlugin\n A QGIS plugin\n Let you add tiled datasets (GDAL WMS) and shows them in the correct scale.\n -------------------\n begin : 2014-03-03\n copyright : (C) 2014 by Matthias Ludwig - Datalyze Solutions\n email : [email protected]\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n This script initializes the plugin, making it known to QGIS.\n\"\"\"\n\ndef classFactory(iface):\n # load TileMapScalePlugin class from file TileMapScalePlugin\n from .tilemapscaleplugin import TileMapScalePlugin\n return TileMapScalePlugin(iface)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import math
#variables for current GPS Lat / Lon Readings
currentLat = 41.391240
currentLon = -73.956217
destLat = 41.393035
destLon = -73.953398
#variables for current UTM coordinates
currentX = 587262
currentY = 4582716
destX = 587499
destY = 4582919
#declination angle based on geographic location
#see #https://www.ngdc.noaa.gov/geomag-web/
#needed for "grid-to-magnetic" angle
declinationAngle = 13
########### Functions ############################################################################
def haversine(currentLat,currentLon, destLat, destLon):
#Calculate the great circle distance between two points
#on the earth (specified in decimal degrees - Lat/Lon coords) using Haversine Formula
haversineDistance = math.acos( math.sin(currentLat*math.pi/180)*math.sin(destLat*math.pi/180) + math.cos(currentLat*math.pi/180)*math.cos(destLat*math.pi/180)*math.cos(destLon*math.pi/180-currentLon*math.pi/180) ) * 6371000
haversineAngle = ( math.atan2(math.cos(currentLat)*math.sin(destLat)-math.sin(currentLat)*math.cos(destLat)*math.cos(destLon-currentLon), math.sin(destLon-currentLon)*math.cos(destLat)) ) * (180/math.pi)
#transform angle perspective - Haversine calculates angle with the perspective that 90 degrees points North
#for magnetic field reference, we need North to correspond to 0 degrees, so subtract 90
magBearing = haversineAngle - 90
#account for declination angle (Westerly declination, so add offset)
magBearing = magBearing + declinationAngle
#account for angle wrap
if magBearing < 0:
magBearing = magBearing + 360
elif magBearing > 360:
magBearing = magBearing - 360
return haversineDistance, magBearing
def distAndBearing_utm(currentX, currentY, destX, destY):
#calculate distance & bearing using UTM coordinates (x,y)-type coordinates
dx = destX - currentX
dy = destY - currentY
#calculate distance between the two points
utm_dist = math.sqrt( (dx)**2 + (dy)**2 )
#calculate the angle between the points
utm_angle = math.atan(dy/float(dx)) * (180/math.pi)
#If we treat the current (X,Y) point as the origin, then destination (X,Y) lies in a quadrant (either I,II,III, or IV), because ->
#the dx and dy (above) results in a + or - difference, which indicates the destination quadrant.
#The quadrant will determine the type of angle adjustment needed magnetically (based on N,S,E, and W heading)
if dx > 0 and dy > 0: #then destination is in quadrant I (between N and E); atan angle is positive
utm_angleTF = 90 - utm_angle
elif dx < 0 and dy > 0: #then destination is in quadrant II (between N and W)
#atan angle calculation is negative; (get rid of neg. sign, then add to 270 deg-West)
utm_angleTF = 270 + (-1 * utm_angle)
elif dx < 0 and dy < 0: #then destination is in quadrant III (between (W and S); atan angle is positive
utm_angleTF = 270 - utm_angle
else: # dx > 0 and dy <0, then quad IV (between S and E)
#angle calculation is negative; (get rid of neg. sign, then add to 90 deg-East)
utm_angleTF = 90 + (-1 * utm_angle)
#account for declination angle (Westerly declination angle, so add offset)
magUtmBearing = utm_angleTF + declinationAngle #add offset due to Westerly declination
#account for angle wrap
if magUtmBearing < 0:
magUtmBearing = magUtmBearing + 360
elif magUtmBearing > 360:
magUtmBearing = magUtmBearing - 360
return utm_dist, magUtmBearing
####### MAIN ########################################################
dist, bearing = haversine(currentLat,currentLon, destLat, destLon)
print "Distance & Bearning based on Lat/Lon is: ", dist, bearing
utm_dist, utm_angle = distAndBearing_utm(currentX, currentY, destX, destY)
print "Distance & Bearning based on UTM is: ", utm_dist, utm_angle
|
normal
|
{
"blob_id": "180d28ac77b6ff4488b3fd9c17a9ee4571e33631",
"index": 2694,
"step-1": "import math\n\n#variables for current GPS Lat / Lon Readings\ncurrentLat = 41.391240\ncurrentLon = -73.956217\ndestLat = 41.393035\ndestLon = -73.953398\n\n#variables for current UTM coordinates\ncurrentX = 587262\ncurrentY = 4582716\ndestX = 587499\ndestY = 4582919\n\n#declination angle based on geographic location\n#see #https://www.ngdc.noaa.gov/geomag-web/\n#needed for \"grid-to-magnetic\" angle\ndeclinationAngle = 13\n\n\n########### Functions ############################################################################\ndef haversine(currentLat,currentLon, destLat, destLon):\n #Calculate the great circle distance between two points \n #on the earth (specified in decimal degrees - Lat/Lon coords) using Haversine Formula\n \n haversineDistance = math.acos( math.sin(currentLat*math.pi/180)*math.sin(destLat*math.pi/180) + math.cos(currentLat*math.pi/180)*math.cos(destLat*math.pi/180)*math.cos(destLon*math.pi/180-currentLon*math.pi/180) ) * 6371000\n \n haversineAngle = ( math.atan2(math.cos(currentLat)*math.sin(destLat)-math.sin(currentLat)*math.cos(destLat)*math.cos(destLon-currentLon), math.sin(destLon-currentLon)*math.cos(destLat)) ) * (180/math.pi) \n \n #transform angle perspective - Haversine calculates angle with the perspective that 90 degrees points North\n #for magnetic field reference, we need North to correspond to 0 degrees, so subtract 90\n magBearing = haversineAngle - 90\n #account for declination angle (Westerly declination, so add offset)\n magBearing = magBearing + declinationAngle \n #account for angle wrap\n if magBearing < 0:\n magBearing = magBearing + 360 \n elif magBearing > 360:\n magBearing = magBearing - 360\n return haversineDistance, magBearing \n \ndef distAndBearing_utm(currentX, currentY, destX, destY):\n #calculate distance & bearing using UTM coordinates (x,y)-type coordinates\n dx = destX - currentX\n dy = destY - currentY\n #calculate distance between the two points\n utm_dist = math.sqrt( (dx)**2 + (dy)**2 )\n #calculate the angle between the points\n utm_angle = math.atan(dy/float(dx)) * (180/math.pi)\n \n #If we treat the current (X,Y) point as the origin, then destination (X,Y) lies in a quadrant (either I,II,III, or IV), because ->\n #the dx and dy (above) results in a + or - difference, which indicates the destination quadrant.\n #The quadrant will determine the type of angle adjustment needed magnetically (based on N,S,E, and W heading)\n if dx > 0 and dy > 0: #then destination is in quadrant I (between N and E); atan angle is positive\n utm_angleTF = 90 - utm_angle\n elif dx < 0 and dy > 0: #then destination is in quadrant II (between N and W)\n #atan angle calculation is negative; (get rid of neg. sign, then add to 270 deg-West)\n utm_angleTF = 270 + (-1 * utm_angle)\n elif dx < 0 and dy < 0: #then destination is in quadrant III (between (W and S); atan angle is positive\n utm_angleTF = 270 - utm_angle\n else: # dx > 0 and dy <0, then quad IV (between S and E)\n #angle calculation is negative; (get rid of neg. sign, then add to 90 deg-East)\n utm_angleTF = 90 + (-1 * utm_angle)\n \n #account for declination angle (Westerly declination angle, so add offset)\n magUtmBearing = utm_angleTF + declinationAngle #add offset due to Westerly declination \n #account for angle wrap\n if magUtmBearing < 0:\n magUtmBearing = magUtmBearing + 360 \n elif magUtmBearing > 360:\n magUtmBearing = magUtmBearing - 360\n \n return utm_dist, magUtmBearing \n\n\n####### MAIN ########################################################\ndist, bearing = haversine(currentLat,currentLon, destLat, destLon)\nprint \"Distance & Bearning based on Lat/Lon is: \", dist, bearing\nutm_dist, utm_angle = distAndBearing_utm(currentX, currentY, destX, destY)\nprint \"Distance & Bearning based on UTM is: \", utm_dist, utm_angle\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.urls import reverse_lazy
from django.views.generic import CreateView, edit, ListView
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from users.forms import CustomUserCreationForm, LoginForm
from users.models import CustomUser as Users
class SignUpView(CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'users/signup.html'
class IndexView(edit.FormView):
success_url = '/facilities'
form_class = LoginForm
template_name = 'users/index.html'
def form_valid(self, form):
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user = authenticate(self.request, username=username, password=password)
if user is not None:
login(self.request, user)
return super().form_valid(form)
|
normal
|
{
"blob_id": "6bd9c8e38373e696193c146b88ebf6601170cf0e",
"index": 9549,
"step-1": "<mask token>\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n",
"step-2": "<mask token>\n\n\nclass SignUpView(CreateView):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n",
"step-3": "<mask token>\n\n\nclass SignUpView(CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'users/signup.html'\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n",
"step-4": "from django.urls import reverse_lazy\nfrom django.views.generic import CreateView, edit, ListView\nfrom django.shortcuts import render\nfrom django.contrib.auth import authenticate, login\nfrom users.forms import CustomUserCreationForm, LoginForm\nfrom users.models import CustomUser as Users\n\n\nclass SignUpView(CreateView):\n form_class = CustomUserCreationForm\n success_url = reverse_lazy('login')\n template_name = 'users/signup.html'\n\n\nclass IndexView(edit.FormView):\n success_url = '/facilities'\n form_class = LoginForm\n template_name = 'users/index.html'\n\n def form_valid(self, form):\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(self.request, username=username, password=password)\n if user is not None:\n login(self.request, user)\n return super().form_valid(form)\n",
"step-5": null,
"step-ids": [
3,
4,
5,
6
]
}
|
[
3,
4,
5,
6
] |
version https://git-lfs.github.com/spec/v1
oid sha256:26be7fc8be181fad8e821179cce6be14e37a5f303e532e6fb00f848d5f33fe41
size 752
|
normal
|
{
"blob_id": "0f37baf3b08ecf7bd8db43ecc2f29c3ca6e00af0",
"index": 3089,
"step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:26be7fc8be181fad8e821179cce6be14e37a5f303e532e6fb00f848d5f33fe41\nsize 752\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''
Autor: Jazielinho
'''
import keyboard
from PIL import ImageGrab
import os
import tqdm
import random
from training import config_tr
class DataSet(object):
''' clase que crea dataset de entrenamiento '''
saltar = 'saltar'
nada = 'nada'
reglas = [saltar, nada]
formato = 'PNG'
train = 'train'
val = 'val'
def __init__(self, val_split: int = 0.2) -> None:
self.imagenes = []
self.targets = []
self.nombre_maximo = 0
nombres_maximos = []
for regla in DataSet.reglas:
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla)
if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla):
os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla) + \
os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)
if len(lista_imagenes) == 0:
nombre_maximo = [0]
else:
maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for x in lista_imagenes]
nombre_maximo = maximo_nombre
nombres_maximos = nombres_maximos + nombre_maximo
self.nombre_maximo = max(nombres_maximos)
self.val_split = val_split
def genera_datos(self) -> None:
imagenes = []
targets = []
# Empieza a funcionar desde presionar espacio
while True:
if keyboard.is_pressed('space'):
break
while True:
# Las imagenes estan en blanco y negro
imagen = ImageGrab.grab()
imagenes.append(imagen)
if keyboard.is_pressed('escape'):
break
if keyboard.is_pressed('space') or keyboard.is_pressed('up'):
targets.append(DataSet.saltar)
else:
targets.append(DataSet.nada)
self.imagenes = imagenes
self.targets = targets
self.guardar_info()
def guardar_info(self) -> None:
''' guardamos las imagenes '''
for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets), total=len(self.imagenes)):
self.nombre_maximo += 1
random_ = random.random()
if random_ <= 1 - self.val_split:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
else:
image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato
imagen.save(image_PATH, DataSet.formato)
if __name__ == '__main__':
self = DataSet()
self.genera_datos()
|
normal
|
{
"blob_id": "c931d1ac5c2d003a8eaac3c6d777ce408df57117",
"index": 8534,
"step-1": "<mask token>\n\n\nclass DataSet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DataSet(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass DataSet(object):\n \"\"\" clase que crea dataset de entrenamiento \"\"\"\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport keyboard\nfrom PIL import ImageGrab\nimport os\nimport tqdm\nimport random\nfrom training import config_tr\n\n\nclass DataSet(object):\n \"\"\" clase que crea dataset de entrenamiento \"\"\"\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int=0.2) ->None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.\n train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + regla)\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' +\n regla)\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' +\n DataSet.train + '/' + regla) + os.listdir(config_tr.\n PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for\n x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) ->None:\n imagenes = []\n targets = []\n while True:\n if keyboard.is_pressed('space'):\n break\n while True:\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n if keyboard.is_pressed('escape'):\n break\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n self.imagenes = imagenes\n self.targets = targets\n self.guardar_info()\n\n def guardar_info(self) ->None:\n \"\"\" guardamos las imagenes \"\"\"\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets),\n total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.train +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n else:\n image_PATH = (config_tr.PATH_IMAGES + '/' + DataSet.val +\n '/' + target + '/' + str(self.nombre_maximo) + '.' +\n DataSet.formato)\n imagen.save(image_PATH, DataSet.formato)\n\n\nif __name__ == '__main__':\n self = DataSet()\n self.genera_datos()\n",
"step-5": "'''\nAutor: Jazielinho\n'''\n\nimport keyboard\nfrom PIL import ImageGrab\nimport os\nimport tqdm\nimport random\n\nfrom training import config_tr\n\n\nclass DataSet(object):\n ''' clase que crea dataset de entrenamiento '''\n\n saltar = 'saltar'\n nada = 'nada'\n reglas = [saltar, nada]\n formato = 'PNG'\n train = 'train'\n val = 'val'\n\n def __init__(self, val_split: int = 0.2) -> None:\n self.imagenes = []\n self.targets = []\n self.nombre_maximo = 0\n\n nombres_maximos = []\n for regla in DataSet.reglas:\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla)\n\n if not os.path.exists(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla):\n os.makedirs(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n\n lista_imagenes = os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + regla) + \\\n os.listdir(config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + regla)\n if len(lista_imagenes) == 0:\n nombre_maximo = [0]\n else:\n maximo_nombre = [int(x.split('.' + DataSet.formato)[0]) for x in lista_imagenes]\n nombre_maximo = maximo_nombre\n nombres_maximos = nombres_maximos + nombre_maximo\n\n self.nombre_maximo = max(nombres_maximos)\n self.val_split = val_split\n\n def genera_datos(self) -> None:\n imagenes = []\n targets = []\n\n # Empieza a funcionar desde presionar espacio\n while True:\n if keyboard.is_pressed('space'):\n break\n\n while True:\n # Las imagenes estan en blanco y negro\n imagen = ImageGrab.grab()\n imagenes.append(imagen)\n\n if keyboard.is_pressed('escape'):\n break\n\n if keyboard.is_pressed('space') or keyboard.is_pressed('up'):\n targets.append(DataSet.saltar)\n else:\n targets.append(DataSet.nada)\n\n self.imagenes = imagenes\n self.targets = targets\n\n self.guardar_info()\n\n def guardar_info(self) -> None:\n ''' guardamos las imagenes '''\n for imagen, target in tqdm.tqdm(zip(self.imagenes, self.targets), total=len(self.imagenes)):\n self.nombre_maximo += 1\n random_ = random.random()\n if random_ <= 1 - self.val_split:\n image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.train + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato\n else:\n image_PATH = config_tr.PATH_IMAGES + '/' + DataSet.val + '/' + target + '/' + str(self.nombre_maximo) + '.' + DataSet.formato\n imagen.save(image_PATH, DataSet.formato)\n\n\nif __name__ == '__main__':\n self = DataSet()\n\n self.genera_datos()\n",
"step-ids": [
3,
4,
6,
8,
9
]
}
|
[
3,
4,
6,
8,
9
] |
from collections import deque
class Queue:
def __init__(self):
self.container = deque()
def enqueue(self, data):
self.container.appendleft(data)
def dequeue(self):
return self.container.pop()
def is_empty(self):
return len(self.container) == 0
def size(self):
return len(self.container)
def front(self):
if not self.is_empty():
return self.container[-1]
def binary_numbers(n):
queue = Queue()
queue.enqueue("1")
for i in range(n):
front = queue.front()
print(" ", front)
queue.enqueue(front + "0")
queue.enqueue(front + "1")
queue.dequeue()
if __name__ == '__main__':
binary_numbers(20)
|
normal
|
{
"blob_id": "2898506b9fd5b112f93a1ff6b010848244c398bd",
"index": 7197,
"step-1": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n <mask token>\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Queue:\n\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue('1')\n for i in range(n):\n front = queue.front()\n print(' ', front)\n queue.enqueue(front + '0')\n queue.enqueue(front + '1')\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)\n",
"step-5": "from collections import deque\n\nclass Queue:\n def __init__(self):\n self.container = deque()\n\n def enqueue(self, data):\n self.container.appendleft(data)\n\n def dequeue(self):\n return self.container.pop()\n\n def is_empty(self):\n return len(self.container) == 0\n\n def size(self):\n return len(self.container)\n\n def front(self):\n if not self.is_empty():\n return self.container[-1]\n\n\ndef binary_numbers(n):\n queue = Queue()\n queue.enqueue(\"1\")\n\n for i in range(n):\n front = queue.front()\n print(\" \", front)\n queue.enqueue(front + \"0\")\n queue.enqueue(front + \"1\")\n\n queue.dequeue()\n\n\nif __name__ == '__main__':\n binary_numbers(20)",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v1/proto/services/user_interest_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v1/proto/services/user_interest_service.proto',
package='google.ads.googleads.v1.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v1.servicesB\030UserInterestServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V1.Services\312\002 Google\\Ads\\GoogleAds\\V1\\Services\352\002$Google::Ads::GoogleAds::V1::Services'),
serialized_pb=_b('\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\x12 google.ads.googleads.v1.services\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\x1a\x1cgoogle/api/annotations.proto\"/\n\x16GetUserInterestRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xcd\x01\n\x13UserInterestService\x12\xb5\x01\n\x0fGetUserInterest\x12\x38.google.ads.googleads.v1.services.GetUserInterestRequest\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\x82\xd3\xe4\x93\x02\x31\x12//v1/{resource_name=customers/*/userInterests/*}B\xff\x01\n$com.google.ads.googleads.v1.servicesB\x18UserInterestServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V1.Services\xca\x02 Google\\Ads\\GoogleAds\\V1\\Services\xea\x02$Google::Ads::GoogleAds::V1::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETUSERINTERESTREQUEST = _descriptor.Descriptor(
name='GetUserInterestRequest',
full_name='google.ads.googleads.v1.services.GetUserInterestRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v1.services.GetUserInterestRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=195,
serialized_end=242,
)
DESCRIPTOR.message_types_by_name['GetUserInterestRequest'] = _GETUSERINTERESTREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetUserInterestRequest = _reflection.GeneratedProtocolMessageType('GetUserInterestRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUSERINTERESTREQUEST,
__module__ = 'google.ads.googleads_v1.proto.services.user_interest_service_pb2'
,
__doc__ = """Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].
Attributes:
resource_name:
Resource name of the UserInterest to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetUserInterestRequest)
))
_sym_db.RegisterMessage(GetUserInterestRequest)
DESCRIPTOR._options = None
_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(
name='UserInterestService',
full_name='google.ads.googleads.v1.services.UserInterestService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=245,
serialized_end=450,
methods=[
_descriptor.MethodDescriptor(
name='GetUserInterest',
full_name='google.ads.googleads.v1.services.UserInterestService.GetUserInterest',
index=0,
containing_service=None,
input_type=_GETUSERINTERESTREQUEST,
output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2._USERINTEREST,
serialized_options=_b('\202\323\344\223\0021\022//v1/{resource_name=customers/*/userInterests/*}'),
),
])
_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)
DESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE
# @@protoc_insertion_point(module_scope)
|
normal
|
{
"blob_id": "654586443e96f84aae70b3ce3263b0458a27334b",
"index": 473,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n<mask token>\n_sym_db.RegisterMessage(GetUserInterestRequest)\n<mask token>\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\n<mask token>\n",
"step-3": "<mask token>\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\n<mask token>\n_sym_db = _symbol_database.Default()\n<mask token>\nDESCRIPTOR = _descriptor.FileDescriptor(name=\n 'google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services', syntax='proto3',\n serialized_options=_b(\n \"\"\"\n$com.google.ads.googleads.v1.servicesB\u0018UserInterestServiceProtoP\u0001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\u0002\u0003GAAª\u0002 Google.Ads.GoogleAds.V1.ServicesÊ\u0002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\u0002$Google::Ads::GoogleAds::V1::Services\"\"\"\n ), serialized_pb=_b(\n '\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2Í\\x01\\n\\x13UserInterestService\\x12µ\\x01\\n\\x0fGetUserInterest\\x128.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\\x02\\x03GAAª\\x02 Google.Ads.GoogleAds.V1.ServicesÊ\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3'\n ), dependencies=[\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n .DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=\n 'GetUserInterestRequest', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest', filename=\n None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.\n FieldDescriptor(name='resource_name', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',\n index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,\n default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types\n =[], enum_types=[], serialized_options=None, is_extendable=False,\n syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,\n serialized_end=242)\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'\n ] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType(\n 'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=\n _GETUSERINTERESTREQUEST, __module__=\n 'google.ads.googleads_v1.proto.services.user_interest_service_pb2',\n __doc__=\n \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\"\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\nDESCRIPTOR._options = None\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=\n 'UserInterestService', full_name=\n 'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,\n index=0, serialized_options=None, serialized_start=245, serialized_end=\n 450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',\n full_name=\n 'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,\n output_type=\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n ._USERINTEREST, serialized_options=_b(\n '\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}'))])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n",
"step-4": "import sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')\n )\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n_sym_db = _symbol_database.Default()\nfrom google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\nfrom google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2\nDESCRIPTOR = _descriptor.FileDescriptor(name=\n 'google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services', syntax='proto3',\n serialized_options=_b(\n \"\"\"\n$com.google.ads.googleads.v1.servicesB\u0018UserInterestServiceProtoP\u0001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\u0002\u0003GAAª\u0002 Google.Ads.GoogleAds.V1.ServicesÊ\u0002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\u0002$Google::Ads::GoogleAds::V1::Services\"\"\"\n ), serialized_pb=_b(\n '\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2Í\\x01\\n\\x13UserInterestService\\x12µ\\x01\\n\\x0fGetUserInterest\\x128.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\"7\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}Bÿ\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services¢\\x02\\x03GAAª\\x02 Google.Ads.GoogleAds.V1.ServicesÊ\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Servicesê\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3'\n ), dependencies=[\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n .DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR])\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(name=\n 'GetUserInterestRequest', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest', filename=\n None, file=DESCRIPTOR, containing_type=None, fields=[_descriptor.\n FieldDescriptor(name='resource_name', full_name=\n 'google.ads.googleads.v1.services.GetUserInterestRequest.resource_name',\n index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,\n default_value=_b('').decode('utf-8'), message_type=None, enum_type=None,\n containing_type=None, is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR)], extensions=[], nested_types\n =[], enum_types=[], serialized_options=None, is_extendable=False,\n syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=195,\n serialized_end=242)\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'\n ] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType(\n 'GetUserInterestRequest', (_message.Message,), dict(DESCRIPTOR=\n _GETUSERINTERESTREQUEST, __module__=\n 'google.ads.googleads_v1.proto.services.user_interest_service_pb2',\n __doc__=\n \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\"\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\nDESCRIPTOR._options = None\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(name=\n 'UserInterestService', full_name=\n 'google.ads.googleads.v1.services.UserInterestService', file=DESCRIPTOR,\n index=0, serialized_options=None, serialized_start=245, serialized_end=\n 450, methods=[_descriptor.MethodDescriptor(name='GetUserInterest',\n full_name=\n 'google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0, containing_service=None, input_type=_GETUSERINTERESTREQUEST,\n output_type=\n google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\n ._USERINTEREST, serialized_options=_b(\n '\\x82Óä\\x93\\x021\\x12//v1/{resource_name=customers/*/userInterests/*}'))])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: google/ads/googleads_v1/proto/services/user_interest_service.proto\n\nimport sys\n_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\n\nfrom google.ads.google_ads.v1.proto.resources import user_interest_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2\nfrom google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2\n\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name='google/ads/googleads_v1/proto/services/user_interest_service.proto',\n package='google.ads.googleads.v1.services',\n syntax='proto3',\n serialized_options=_b('\\n$com.google.ads.googleads.v1.servicesB\\030UserInterestServiceProtoP\\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\\242\\002\\003GAA\\252\\002 Google.Ads.GoogleAds.V1.Services\\312\\002 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Services\\352\\002$Google::Ads::GoogleAds::V1::Services'),\n serialized_pb=_b('\\nBgoogle/ads/googleads_v1/proto/services/user_interest_service.proto\\x12 google.ads.googleads.v1.services\\x1a;google/ads/googleads_v1/proto/resources/user_interest.proto\\x1a\\x1cgoogle/api/annotations.proto\\\"/\\n\\x16GetUserInterestRequest\\x12\\x15\\n\\rresource_name\\x18\\x01 \\x01(\\t2\\xcd\\x01\\n\\x13UserInterestService\\x12\\xb5\\x01\\n\\x0fGetUserInterest\\x12\\x38.google.ads.googleads.v1.services.GetUserInterestRequest\\x1a/.google.ads.googleads.v1.resources.UserInterest\\\"7\\x82\\xd3\\xe4\\x93\\x02\\x31\\x12//v1/{resource_name=customers/*/userInterests/*}B\\xff\\x01\\n$com.google.ads.googleads.v1.servicesB\\x18UserInterestServiceProtoP\\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v1/services;services\\xa2\\x02\\x03GAA\\xaa\\x02 Google.Ads.GoogleAds.V1.Services\\xca\\x02 Google\\\\Ads\\\\GoogleAds\\\\V1\\\\Services\\xea\\x02$Google::Ads::GoogleAds::V1::Servicesb\\x06proto3')\n ,\n dependencies=[google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])\n\n\n\n\n_GETUSERINTERESTREQUEST = _descriptor.Descriptor(\n name='GetUserInterestRequest',\n full_name='google.ads.googleads.v1.services.GetUserInterestRequest',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name='resource_name', full_name='google.ads.googleads.v1.services.GetUserInterestRequest.resource_name', index=0,\n number=1, type=9, cpp_type=9, label=1,\n has_default_value=False, default_value=_b(\"\").decode('utf-8'),\n message_type=None, enum_type=None, containing_type=None,\n is_extension=False, extension_scope=None,\n serialized_options=None, file=DESCRIPTOR),\n ],\n extensions=[\n ],\n nested_types=[],\n enum_types=[\n ],\n serialized_options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[\n ],\n serialized_start=195,\n serialized_end=242,\n)\n\nDESCRIPTOR.message_types_by_name['GetUserInterestRequest'] = _GETUSERINTERESTREQUEST\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nGetUserInterestRequest = _reflection.GeneratedProtocolMessageType('GetUserInterestRequest', (_message.Message,), dict(\n DESCRIPTOR = _GETUSERINTERESTREQUEST,\n __module__ = 'google.ads.googleads_v1.proto.services.user_interest_service_pb2'\n ,\n __doc__ = \"\"\"Request message for\n [UserInterestService.GetUserInterest][google.ads.googleads.v1.services.UserInterestService.GetUserInterest].\n \n \n Attributes:\n resource_name:\n Resource name of the UserInterest to fetch.\n \"\"\",\n # @@protoc_insertion_point(class_scope:google.ads.googleads.v1.services.GetUserInterestRequest)\n ))\n_sym_db.RegisterMessage(GetUserInterestRequest)\n\n\nDESCRIPTOR._options = None\n\n_USERINTERESTSERVICE = _descriptor.ServiceDescriptor(\n name='UserInterestService',\n full_name='google.ads.googleads.v1.services.UserInterestService',\n file=DESCRIPTOR,\n index=0,\n serialized_options=None,\n serialized_start=245,\n serialized_end=450,\n methods=[\n _descriptor.MethodDescriptor(\n name='GetUserInterest',\n full_name='google.ads.googleads.v1.services.UserInterestService.GetUserInterest',\n index=0,\n containing_service=None,\n input_type=_GETUSERINTERESTREQUEST,\n output_type=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_user__interest__pb2._USERINTEREST,\n serialized_options=_b('\\202\\323\\344\\223\\0021\\022//v1/{resource_name=customers/*/userInterests/*}'),\n ),\n])\n_sym_db.RegisterServiceDescriptor(_USERINTERESTSERVICE)\n\nDESCRIPTOR.services_by_name['UserInterestService'] = _USERINTERESTSERVICE\n\n# @@protoc_insertion_point(module_scope)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class TreeNode:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
<|reserved_special_token_0|>
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass
return root
<|reserved_special_token_0|>
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
def find_max(root):
if root:
while root.right:
root = root.right
return root
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass
return root
<|reserved_special_token_0|>
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
def find_max(root):
if root:
while root.right:
root = root.right
return root
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass
return root
def delete(root, val):
if not root:
return None
elif val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
elif root.left and root.right:
tmp = find_min(root.right)
root.val = tmp.val
root.right = delete(root.right, tmp.val)
else:
root = root.left if root.left else root.right
return root
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
if __name__ == '__main__':
vals = [1, 2, 3, 4, 5, 6, 7, 8]
root = None
from DataStructure.tree import in_order
for v in vals:
root = insert(root, v)
tree_in_order = in_order(root)
assert vals == tree_in_order, '构建树出错'
print(height(root))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Author:sen
# Date:2020/4/2 14:15
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def find(root, val):
if not root:
return None
if val < root.val:
return find(root.left, val)
elif val > root.val:
return find(root.right, val)
else:
return root
def find_min(root):
if root:
while root.left:
root = root.left
return root
def find_max(root):
if root:
while root.right:
root = root.right
return root
def insert(root, val):
if not root:
root = TreeNode(val)
elif val < root.val:
root.left = insert(root.left, val)
elif val > root.val:
root.right = insert(root.right, val)
else:
pass # val==root.val val已经在树中,什么都不做
return root
def delete(root, val):
if not root:
return None
elif val < root.val:
root.left = delete(root.left, val) # 返回左子树的根
elif val > root.val:
root.right = delete(root.right, val)
else: # 执行删除操作
if root.left and root.right: # 两个孩子节点的情况
tmp = find_min(root.right)
root.val = tmp.val
root.right = delete(root.right, tmp.val)
else: # 0个或1个
root = root.left if root.left else root.right
return root
def height(root):
if root is None:
return -1
else:
return 1 + max(height(root.left), height(root.right))
if __name__ == '__main__':
vals = [1, 2, 3, 4, 5, 6, 7, 8]
root = None
from DataStructure.tree import in_order
for v in vals:
root = insert(root, v)
tree_in_order = in_order(root)
assert vals == tree_in_order, "构建树出错"
# vals.append(9)
# root = insert(root, 9)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "插入出错"
#
# vals.remove(6)
# root = delete(root, 6)
# tree_in_order = in_order(root)
# assert vals == tree_in_order, "删除出错"
print(height(root))
|
flexible
|
{
"blob_id": "9e525eccbf10a710d6f37c903370cc10f7d2c62b",
"index": 8475,
"step-1": "class TreeNode:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\n<mask token>\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\n<mask token>\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\n<mask token>\n",
"step-3": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\n<mask token>\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\n<mask token>\n",
"step-4": "class TreeNode:\n\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n\n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n\n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass\n return root\n\n\ndef delete(root, val):\n if not root:\n return None\n elif val < root.val:\n root.left = delete(root.left, val)\n elif val > root.val:\n root.right = delete(root.right, val)\n elif root.left and root.right:\n tmp = find_min(root.right)\n root.val = tmp.val\n root.right = delete(root.right, tmp.val)\n else:\n root = root.left if root.left else root.right\n return root\n\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\n\nif __name__ == '__main__':\n vals = [1, 2, 3, 4, 5, 6, 7, 8]\n root = None\n from DataStructure.tree import in_order\n for v in vals:\n root = insert(root, v)\n tree_in_order = in_order(root)\n assert vals == tree_in_order, '构建树出错'\n print(height(root))\n",
"step-5": "# -*- coding: utf-8 -*-\n# Author:sen\n# Date:2020/4/2 14:15\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val \n self.left = None\n self.right = None\n\ndef find(root, val):\n if not root:\n return None\n if val < root.val:\n return find(root.left, val)\n elif val > root.val:\n return find(root.right, val)\n else:\n return root\n \n\ndef find_min(root):\n if root:\n while root.left:\n root = root.left\n return root\n \n\ndef find_max(root):\n if root:\n while root.right:\n root = root.right\n return root\n\ndef insert(root, val):\n if not root:\n root = TreeNode(val)\n elif val < root.val:\n root.left = insert(root.left, val)\n elif val > root.val:\n root.right = insert(root.right, val)\n else:\n pass # val==root.val val已经在树中,什么都不做\n return root\n\n\ndef delete(root, val):\n if not root:\n return None\n elif val < root.val:\n root.left = delete(root.left, val) # 返回左子树的根\n elif val > root.val:\n root.right = delete(root.right, val)\n else: # 执行删除操作\n if root.left and root.right: # 两个孩子节点的情况\n tmp = find_min(root.right)\n root.val = tmp.val\n root.right = delete(root.right, tmp.val)\n else: # 0个或1个\n root = root.left if root.left else root.right\n return root\n\ndef height(root):\n if root is None:\n return -1\n else:\n return 1 + max(height(root.left), height(root.right))\n\nif __name__ == '__main__':\n vals = [1, 2, 3, 4, 5, 6, 7, 8]\n root = None\n from DataStructure.tree import in_order\n for v in vals:\n root = insert(root, v)\n tree_in_order = in_order(root)\n assert vals == tree_in_order, \"构建树出错\"\n # vals.append(9)\n # root = insert(root, 9)\n # tree_in_order = in_order(root)\n # assert vals == tree_in_order, \"插入出错\"\n # \n # vals.remove(6)\n # root = delete(root, 6)\n # tree_in_order = in_order(root)\n # assert vals == tree_in_order, \"删除出错\"\n \n print(height(root))\n ",
"step-ids": [
1,
6,
7,
9,
10
]
}
|
[
1,
6,
7,
9,
10
] |
<|reserved_special_token_0|>
def preprocess(passengers, columns_to_delete):
for column_to_delete in sorted(columns_to_delete, reverse=True):
[passenger.pop(column_to_delete) for passenger in passengers]
for i in range(len(passengers)):
passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0
print(np.array(passengers, dtype=np.float32))
return np.array(passengers, dtype=np.float32)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
titanic.download_dataset('titanic_dataset.csv')
<|reserved_special_token_0|>
def preprocess(passengers, columns_to_delete):
for column_to_delete in sorted(columns_to_delete, reverse=True):
[passenger.pop(column_to_delete) for passenger in passengers]
for i in range(len(passengers)):
passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0
print(np.array(passengers, dtype=np.float32))
return np.array(passengers, dtype=np.float32)
<|reserved_special_token_0|>
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)
<|reserved_special_token_0|>
print('DiCaprio Surviving Rate:', pred[0][1])
print('Winslet Surviving Rate:', pred[1][1])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
titanic.download_dataset('titanic_dataset.csv')
<|reserved_special_token_0|>
data, labels = load_csv('titanic_dataset.csv', target_column=0,
categorical_labels=True, n_classes=2)
<|reserved_special_token_0|>
def preprocess(passengers, columns_to_delete):
for column_to_delete in sorted(columns_to_delete, reverse=True):
[passenger.pop(column_to_delete) for passenger in passengers]
for i in range(len(passengers)):
passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0
print(np.array(passengers, dtype=np.float32))
return np.array(passengers, dtype=np.float32)
to_ignore = [1, 6]
data = preprocess(data, to_ignore)
<|reserved_special_token_0|>
net = tflearn.input_data(shape=[None, 6])
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net)
<|reserved_special_token_0|>
model = tflearn.DNN(net)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)
<|reserved_special_token_0|>
dicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]
winslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]
dicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)
pred = model.predict([dicaprio, winslet])
print('DiCaprio Surviving Rate:', pred[0][1])
print('Winslet Surviving Rate:', pred[1][1])
<|reserved_special_token_1|>
from __future__ import division, print_function, absolute_import
<|reserved_special_token_0|>
import numpy as np
import tflearn
from tflearn.datasets import titanic
titanic.download_dataset('titanic_dataset.csv')
from tflearn.data_utils import load_csv
data, labels = load_csv('titanic_dataset.csv', target_column=0,
categorical_labels=True, n_classes=2)
<|reserved_special_token_0|>
def preprocess(passengers, columns_to_delete):
for column_to_delete in sorted(columns_to_delete, reverse=True):
[passenger.pop(column_to_delete) for passenger in passengers]
for i in range(len(passengers)):
passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0
print(np.array(passengers, dtype=np.float32))
return np.array(passengers, dtype=np.float32)
to_ignore = [1, 6]
data = preprocess(data, to_ignore)
<|reserved_special_token_0|>
net = tflearn.input_data(shape=[None, 6])
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 32)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net)
<|reserved_special_token_0|>
model = tflearn.DNN(net)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)
<|reserved_special_token_0|>
dicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]
winslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]
dicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)
pred = model.predict([dicaprio, winslet])
print('DiCaprio Surviving Rate:', pred[0][1])
print('Winslet Surviving Rate:', pred[1][1])
<|reserved_special_token_1|>
from __future__ import division, print_function, absolute_import
"""
The dataset is stored in a CSV file, so we can use the TFLearn load_csv() function to
load the data from the CSV file into a python list.
We specify the 'target_column' argument to indicate that our labels (survived or not)
are located in the first column (id: 0). The function will return a tuple: (data, labels).
"""
import numpy as np
import tflearn
#DownLoad the Titanic dataset
from tflearn.datasets import titanic
titanic.download_dataset('titanic_dataset.csv')
#loadCSVfile,indicate that the first column represent labels
from tflearn.data_utils import load_csv
data, labels = load_csv('titanic_dataset.csv',target_column=0,
categorical_labels=True,n_classes=2)
'''
Preprocessing Data
Data are given 'as is' and need some preprocessing to be ready for use in our deep neural network classifier.
First, we will discard the fields that are not likely to help in our analysis.
For example, we make the assumption that the 'name' field will not be very useful in our task,
since a passenger's name and his or her chance of surviving are probably not correlated.
With such thinking, we can go ahead and discard the 'name' and 'ticket' fields.
Then, we need to convert all our data to numerical values,
because a neural network model can only perform operations over numbers.
However, our dataset contains some non-numerical values, such as 'name' and 'sex'. Because 'name' is discarded,
we just need to handle the 'sex' field. In this simple case, we will just assign '0' to males and '1' to females.
example:
survived pclass name sex age sibsp parch ticket fare
1 1 Aubart, Mme. Leontine Pauline female 24 0 0 PC 17477 69.3000
'''
# Here is the preprocessing function:
#Preprocessing function
def preprocess(passengers,columns_to_delete):
#Sort by descending is and delete column
for column_to_delete in sorted(columns_to_delete,reverse = True):
[passenger.pop(column_to_delete) for passenger in passengers]
# print(type(passengers[0]))
for i in range(len(passengers)):
# Converting 'sex' field to float (id is 1 after removing labels column)
passengers[i][1] = 1. if passengers[i][1] == 'female' else 0.
print(np.array(passengers,dtype=np.float32))
return np.array(passengers,dtype=np.float32)
# Ignore 'name' and 'ticket' columns (id 1 & 6 of data array)
to_ignore = [1,6]
#Preprocess data
data = preprocess(data,to_ignore)
'''
Build a Deep Neural Network
We are building a 3-layer neural network using TFLearn. First, we need to specify the shape of our input data.
In our case, each sample has a total of 6 features, and we will process samples per batch to save memory.
So our data input shape is [None, 6] ('None' stands for an unknown dimension, so we can change the total
number of samples that are processed in a batch).
'''
# Build neural network
net = tflearn.input_data(shape=[None,6])
net = tflearn.fully_connected(net,32)
net = tflearn.fully_connected(net,32)
net = tflearn.fully_connected(net,2,activation='softmax')
net =tflearn.regression(net)
'''
Training
TFLearn provides a model wrapper ('DNN') that automatically performs neural network classifier tasks,
such as training, prediction, save/restore, and more. We will run it for 10 epochs
(i.e., the network will see all data 10 times) with a batch size of 16.
'''
#Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)
'''
Try the Model
It's time to try out our model.
For fun, let's take Titanic movie protagonists
(DiCaprio and Winslet) and calculate their chance of surviving (class 1).
'''
# Let's create some data for DiCaprio and Winslet
dicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0000]
winslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0000]
# Preprocess data
dicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)
# Predict surviving chances (class 1 results)
pred = model.predict([dicaprio, winslet])
print("DiCaprio Surviving Rate:", pred[0][1])
print("Winslet Surviving Rate:", pred[1][1])
|
flexible
|
{
"blob_id": "87e9c1d264523d02b287dedb44472fc08b488908",
"index": 9630,
"step-1": "<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\n<mask token>\n",
"step-2": "<mask token>\ntitanic.download_dataset('titanic_dataset.csv')\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\n<mask token>\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-3": "<mask token>\ntitanic.download_dataset('titanic_dataset.csv')\n<mask token>\ndata, labels = load_csv('titanic_dataset.csv', target_column=0,\n categorical_labels=True, n_classes=2)\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\nto_ignore = [1, 6]\ndata = preprocess(data, to_ignore)\n<mask token>\nnet = tflearn.input_data(shape=[None, 6])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nnet = tflearn.regression(net)\n<mask token>\nmodel = tflearn.DNN(net)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\npred = model.predict([dicaprio, winslet])\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-4": "from __future__ import division, print_function, absolute_import\n<mask token>\nimport numpy as np\nimport tflearn\nfrom tflearn.datasets import titanic\ntitanic.download_dataset('titanic_dataset.csv')\nfrom tflearn.data_utils import load_csv\ndata, labels = load_csv('titanic_dataset.csv', target_column=0,\n categorical_labels=True, n_classes=2)\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\nto_ignore = [1, 6]\ndata = preprocess(data, to_ignore)\n<mask token>\nnet = tflearn.input_data(shape=[None, 6])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nnet = tflearn.regression(net)\n<mask token>\nmodel = tflearn.DNN(net)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\npred = model.predict([dicaprio, winslet])\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-5": "from __future__ import division, print_function, absolute_import\n\"\"\"\nThe dataset is stored in a CSV file, so we can use the TFLearn load_csv() function to\n load the data from the CSV file into a python list.\n We specify the 'target_column' argument to indicate that our labels (survived or not)\n are located in the first column (id: 0). The function will return a tuple: (data, labels).\n\"\"\"\nimport numpy as np\nimport tflearn\n\n#DownLoad the Titanic dataset\nfrom tflearn.datasets import titanic\ntitanic.download_dataset('titanic_dataset.csv')\n\n#loadCSVfile,indicate that the first column represent labels\nfrom tflearn.data_utils import load_csv\ndata, labels = load_csv('titanic_dataset.csv',target_column=0,\n\t\t\t\t\t\tcategorical_labels=True,n_classes=2)\n\n'''\nPreprocessing Data\n\nData are given 'as is' and need some preprocessing to be ready for use in our deep neural network classifier.\nFirst, we will discard the fields that are not likely to help in our analysis.\nFor example, we make the assumption that the 'name' field will not be very useful in our task,\nsince a passenger's name and his or her chance of surviving are probably not correlated.\nWith such thinking, we can go ahead and discard the 'name' and 'ticket' fields.\nThen, we need to convert all our data to numerical values,\nbecause a neural network model can only perform operations over numbers.\nHowever, our dataset contains some non-numerical values, such as 'name' and 'sex'. Because 'name' is discarded,\nwe just need to handle the 'sex' field. In this simple case, we will just assign '0' to males and '1' to females.\n\nexample:\nsurvived\tpclass\tname\t\t\t\t\t\t\tsex\t\tage\t\tsibsp\tparch\tticket\t\tfare\n1\t\t\t1\t\tAubart, Mme. Leontine Pauline\tfemale\t24\t\t0\t\t0\t\tPC 17477\t69.3000\n'''\n# Here is the preprocessing function:\n#Preprocessing function\ndef preprocess(passengers,columns_to_delete):\n\t#Sort by descending is and delete column\n\tfor column_to_delete in sorted(columns_to_delete,reverse = True):\n\t\t[passenger.pop(column_to_delete) for passenger in passengers]\n\t# print(type(passengers[0]))\n\tfor i in range(len(passengers)):\n\t\t# Converting 'sex' field to float (id is 1 after removing labels column)\n\t\tpassengers[i][1] = 1. if passengers[i][1] == 'female' else 0.\n\tprint(np.array(passengers,dtype=np.float32))\n\treturn np.array(passengers,dtype=np.float32)\n\n# Ignore 'name' and 'ticket' columns (id 1 & 6 of data array)\nto_ignore = [1,6]\n#Preprocess data\ndata = preprocess(data,to_ignore)\n\n'''\nBuild a Deep Neural Network\n\nWe are building a 3-layer neural network using TFLearn. First, we need to specify the shape of our input data.\nIn our case, each sample has a total of 6 features, and we will process samples per batch to save memory.\nSo our data input shape is [None, 6] ('None' stands for an unknown dimension, so we can change the total\nnumber of samples that are processed in a batch).\n'''\n# Build neural network\nnet = tflearn.input_data(shape=[None,6])\nnet = tflearn.fully_connected(net,32)\nnet = tflearn.fully_connected(net,32)\nnet = tflearn.fully_connected(net,2,activation='softmax')\nnet =tflearn.regression(net)\n\n'''\nTraining\n\nTFLearn provides a model wrapper ('DNN') that automatically performs neural network classifier tasks,\nsuch as training, prediction, save/restore, and more. We will run it for 10 epochs\n(i.e., the network will see all data 10 times) with a batch size of 16.\n'''\n\n#Define model\nmodel = tflearn.DNN(net)\n# Start training (apply gradient descent algorithm)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n\n'''\nTry the Model\nIt's time to try out our model.\nFor fun, let's take Titanic movie protagonists\n(DiCaprio and Winslet) and calculate their chance of surviving (class 1).\n'''\n\n# Let's create some data for DiCaprio and Winslet\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0000]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0000]\n# Preprocess data\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\n# Predict surviving chances (class 1 results)\npred = model.predict([dicaprio, winslet])\nprint(\"DiCaprio Surviving Rate:\", pred[0][1])\nprint(\"Winslet Surviving Rate:\", pred[1][1])\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Airplane(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
<|reserved_special_token_0|>
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Airplane(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
<|reserved_special_token_0|>
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code
).first()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Airplane(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
def __repr__(self):
return 'Airplane({icao_code}, {airline})'.format(icao_code=self.
icao_code, airline=self.airline)
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code
).first()
<|reserved_special_token_1|>
from sqlalchemy import literal, Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from common.db import Base
class Airplane(Base):
__tablename__ = 'airplanes'
id = Column(Integer, primary_key=True)
icao_code = Column(String(6), unique=True, nullable=False)
airline_id = Column(Integer, ForeignKey('airlines.id'))
airline = relationship('Airline', backref='airplanes')
manufacturer = Column(String)
model = Column(String)
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
def __repr__(self):
return 'Airplane({icao_code}, {airline})'.format(icao_code=self.
icao_code, airline=self.airline)
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code
).first()
<|reserved_special_token_1|>
from sqlalchemy import literal, Column, String, Integer, ForeignKey
from sqlalchemy.orm import relationship
from common.db import Base
class Airplane(Base):
__tablename__ = 'airplanes'
id = Column(Integer, primary_key=True)
icao_code = Column(String(6), unique=True, nullable=False) # ICAO 24-bit identifier
airline_id = Column(Integer, ForeignKey('airlines.id'))
airline = relationship('Airline', backref='airplanes')
manufacturer = Column(String)
model = Column(String)
def __init__(self, icao_code, airline, manufacturer=None, model=None):
self.icao_code = icao_code
self.airline = airline
self.manufacturer = manufacturer
self.model = model
def __repr__(self):
return 'Airplane({icao_code}, {airline})'.format(
icao_code=self.icao_code,
airline=self.airline)
@staticmethod
def exists_airplane(session, icao_code):
q = session.query(Airplane).filter(Airplane.icao_code == icao_code)
return session.query(literal(True)).filter(q.exists()).scalar()
@staticmethod
def airplane_from_icao_code(session, icao_code):
return session.query(Airplane).filter(Airplane.icao_code == icao_code).first()
|
flexible
|
{
"blob_id": "98dac1ea372f16ecdb818fbe3287ab7e51a0d67c",
"index": 7916,
"step-1": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n <mask token>\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n <mask token>\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-3": "<mask token>\n\n\nclass Airplane(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(icao_code=self.\n icao_code, airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-4": "from sqlalchemy import literal, Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom common.db import Base\n\n\nclass Airplane(Base):\n __tablename__ = 'airplanes'\n id = Column(Integer, primary_key=True)\n icao_code = Column(String(6), unique=True, nullable=False)\n airline_id = Column(Integer, ForeignKey('airlines.id'))\n airline = relationship('Airline', backref='airplanes')\n manufacturer = Column(String)\n model = Column(String)\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(icao_code=self.\n icao_code, airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code\n ).first()\n",
"step-5": "from sqlalchemy import literal, Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom common.db import Base\n\n\nclass Airplane(Base):\n __tablename__ = 'airplanes'\n\n id = Column(Integer, primary_key=True)\n icao_code = Column(String(6), unique=True, nullable=False) # ICAO 24-bit identifier\n airline_id = Column(Integer, ForeignKey('airlines.id'))\n airline = relationship('Airline', backref='airplanes')\n manufacturer = Column(String)\n model = Column(String)\n\n def __init__(self, icao_code, airline, manufacturer=None, model=None):\n self.icao_code = icao_code\n self.airline = airline\n self.manufacturer = manufacturer\n self.model = model\n\n def __repr__(self):\n return 'Airplane({icao_code}, {airline})'.format(\n icao_code=self.icao_code,\n airline=self.airline)\n\n @staticmethod\n def exists_airplane(session, icao_code):\n q = session.query(Airplane).filter(Airplane.icao_code == icao_code)\n return session.query(literal(True)).filter(q.exists()).scalar()\n\n @staticmethod\n def airplane_from_icao_code(session, icao_code):\n return session.query(Airplane).filter(Airplane.icao_code == icao_code).first()\n \n ",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
<|reserved_special_token_0|>
class ParGrid:
<|reserved_special_token_0|>
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))
def get_n_dimensions(self):
return len(self.dims)
<|reserved_special_token_0|>
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += self.get_n_points_of_dim(i),
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
<|reserved_special_token_0|>
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f'Grid point at indices {indices}:')
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f'{name}[{i_arg}] = {value_str}')
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg
] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg
] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['args'][c_i_arg
] = c_value_str[i_par]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ParGrid:
""" Parameter Grid class
Each ParGrid entry corresponds to a dsp parameter to be varied.
The ntuples must follow the pattern:
( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )
where name is the name of the dsp routine in dsp_config whose to be
optimized, i_arg is the index of the argument to be varied, value_strs is
the array of strings to set the argument to, and companions is an optional
list of ( name, i_arg, value_strs ) tuples for companion arguments that
need to change along with this one
Optionally, i_arg can be a list of the argument indices to be varied together,
where value_strs is a list of lists correponding to the strings to set the
arguments to in the same order.
"""
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))
def get_n_dimensions(self):
return len(self.dims)
def get_n_points_of_dim(self, i):
return len(self.dims[i].value_strs)
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += self.get_n_points_of_dim(i),
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
def iterate_indices(self, indices):
""" iterate given indices [i1, i2, ...] by one.
For easier iteration. The convention here is arbitrary, but its the
order the arrays would be traversed in a series of nested for loops in
the order appearin in dims (first dimension is first for loop, etc):
Return False when the grid runs out of indices. Otherwise returns True.
"""
for iD in reversed(range(self.get_n_dimensions())):
indices[iD] += 1
if indices[iD] < self.get_n_points_of_dim(iD):
return True
indices[iD] = 0
return False
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f'Grid point at indices {indices}:')
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f'{name}[{i_arg}] = {value_str}')
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg
] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg
] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['args'][c_i_arg
] = c_value_str[i_par]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,
verbosity=0):
"""
Run one iteration of DSP on tb_data
Optionally returns a value for optimization
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed for this iteration (see
build_processing_chain()) and the list of output variables to appear in
the output table
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
fom_function : function or None (optional)
When given the output lh5 table of this DSP iteration, the
fom_function must return a scalar figure-of-merit value upon which the
optimization will be based. Should accept verbosity as a second argument
verbosity : int (optional)
verbosity for the processing chain and fom_function calls
Returns:
--------
figure_of_merit : float
If fom_function is not None, returns figure-of-merit value for the DSP iteration
tb_out : lh5 Table
If fom_function is None, returns the output lh5 table for the DSP iteration
"""
pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=
db_dict, verbosity=verbosity)
pc.execute()
if fom_function is not None:
return fom_function(tb_out, verbosity)
else:
return tb_out
<|reserved_special_token_0|>
class ParGrid:
""" Parameter Grid class
Each ParGrid entry corresponds to a dsp parameter to be varied.
The ntuples must follow the pattern:
( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )
where name is the name of the dsp routine in dsp_config whose to be
optimized, i_arg is the index of the argument to be varied, value_strs is
the array of strings to set the argument to, and companions is an optional
list of ( name, i_arg, value_strs ) tuples for companion arguments that
need to change along with this one
Optionally, i_arg can be a list of the argument indices to be varied together,
where value_strs is a list of lists correponding to the strings to set the
arguments to in the same order.
"""
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))
def get_n_dimensions(self):
return len(self.dims)
def get_n_points_of_dim(self, i):
return len(self.dims[i].value_strs)
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += self.get_n_points_of_dim(i),
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
def iterate_indices(self, indices):
""" iterate given indices [i1, i2, ...] by one.
For easier iteration. The convention here is arbitrary, but its the
order the arrays would be traversed in a series of nested for loops in
the order appearin in dims (first dimension is first for loop, etc):
Return False when the grid runs out of indices. Otherwise returns True.
"""
for iD in reversed(range(self.get_n_dimensions())):
indices[iD] += 1
if indices[iD] < self.get_n_points_of_dim(iD):
return True
indices[iD] = 0
return False
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f'Grid point at indices {indices}:')
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f'{name}[{i_arg}] = {value_str}')
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg
] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg
] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['args'][c_i_arg
] = c_value_str[i_par]
def run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,
db_dict=None, verbosity=0):
"""Extract a table of optimization values for a grid of DSP parameters
The grid argument defines a list of parameters and values over which to run
the DSP defined in dsp_config on tb_data. At each point, a scalar
figure-of-merit is extracted
Returns a N-dimensional ndarray of figure-of-merit values, where the array
axes are in the order they appear in grid.
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed (see build_processing_chain()) and the
list of output variables to appear in the output table for each grid point
grid : ParGrid
See ParGrid class for format
fom_function : function
When given the output lh5 table of this DSP iteration, the fom_function
must return a scalar figure-of-merit. Should accept verbosity as a
second keyword argument
dtype : dtype (optional)
The data type of the fom_function's return object. Should be np.ndarray if
fom_function is set to None
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
verbosity : int (optional)
Verbosity for the processing chain and fom_function calls
Returns:
--------
grid_values : ndarray of floats
An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row
of the grid argument
"""
grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)
iii = grid.get_zero_indices()
if verbosity > 0:
print('Starting grid calculations...')
while True:
grid.set_dsp_pars(dsp_config, iii)
if verbosity > 1:
pprint(dsp_config)
if verbosity > 0:
grid.print_data(iii)
grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=
db_dict, fom_function=fom_function, verbosity=verbosity)
if verbosity > 0:
print('Value:', grid_values[tuple(iii)])
if not grid.iterate_indices(iii):
break
return grid_values
<|reserved_special_token_1|>
import numpy as np
from .build_processing_chain import build_processing_chain
from collections import namedtuple
from pprint import pprint
def run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,
verbosity=0):
"""
Run one iteration of DSP on tb_data
Optionally returns a value for optimization
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed for this iteration (see
build_processing_chain()) and the list of output variables to appear in
the output table
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
fom_function : function or None (optional)
When given the output lh5 table of this DSP iteration, the
fom_function must return a scalar figure-of-merit value upon which the
optimization will be based. Should accept verbosity as a second argument
verbosity : int (optional)
verbosity for the processing chain and fom_function calls
Returns:
--------
figure_of_merit : float
If fom_function is not None, returns figure-of-merit value for the DSP iteration
tb_out : lh5 Table
If fom_function is None, returns the output lh5 table for the DSP iteration
"""
pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=
db_dict, verbosity=verbosity)
pc.execute()
if fom_function is not None:
return fom_function(tb_out, verbosity)
else:
return tb_out
ParGridDimension = namedtuple('ParGridDimension',
'name i_arg value_strs companions')
class ParGrid:
""" Parameter Grid class
Each ParGrid entry corresponds to a dsp parameter to be varied.
The ntuples must follow the pattern:
( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )
where name is the name of the dsp routine in dsp_config whose to be
optimized, i_arg is the index of the argument to be varied, value_strs is
the array of strings to set the argument to, and companions is an optional
list of ( name, i_arg, value_strs ) tuples for companion arguments that
need to change along with this one
Optionally, i_arg can be a list of the argument indices to be varied together,
where value_strs is a list of lists correponding to the strings to set the
arguments to in the same order.
"""
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))
def get_n_dimensions(self):
return len(self.dims)
def get_n_points_of_dim(self, i):
return len(self.dims[i].value_strs)
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += self.get_n_points_of_dim(i),
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
def iterate_indices(self, indices):
""" iterate given indices [i1, i2, ...] by one.
For easier iteration. The convention here is arbitrary, but its the
order the arrays would be traversed in a series of nested for loops in
the order appearin in dims (first dimension is first for loop, etc):
Return False when the grid runs out of indices. Otherwise returns True.
"""
for iD in reversed(range(self.get_n_dimensions())):
indices[iD] += 1
if indices[iD] < self.get_n_points_of_dim(iD):
return True
indices[iD] = 0
return False
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f'Grid point at indices {indices}:')
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f'{name}[{i_arg}] = {value_str}')
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg
] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg
] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]
] = value_str[i]
if companions is None:
continue
for c_name, c_i_arg, c_value_str in companions:
dsp_config['processors'][c_name]['args'][c_i_arg
] = c_value_str[i_par]
def run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,
db_dict=None, verbosity=0):
"""Extract a table of optimization values for a grid of DSP parameters
The grid argument defines a list of parameters and values over which to run
the DSP defined in dsp_config on tb_data. At each point, a scalar
figure-of-merit is extracted
Returns a N-dimensional ndarray of figure-of-merit values, where the array
axes are in the order they appear in grid.
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed (see build_processing_chain()) and the
list of output variables to appear in the output table for each grid point
grid : ParGrid
See ParGrid class for format
fom_function : function
When given the output lh5 table of this DSP iteration, the fom_function
must return a scalar figure-of-merit. Should accept verbosity as a
second keyword argument
dtype : dtype (optional)
The data type of the fom_function's return object. Should be np.ndarray if
fom_function is set to None
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
verbosity : int (optional)
Verbosity for the processing chain and fom_function calls
Returns:
--------
grid_values : ndarray of floats
An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row
of the grid argument
"""
grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)
iii = grid.get_zero_indices()
if verbosity > 0:
print('Starting grid calculations...')
while True:
grid.set_dsp_pars(dsp_config, iii)
if verbosity > 1:
pprint(dsp_config)
if verbosity > 0:
grid.print_data(iii)
grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=
db_dict, fom_function=fom_function, verbosity=verbosity)
if verbosity > 0:
print('Value:', grid_values[tuple(iii)])
if not grid.iterate_indices(iii):
break
return grid_values
<|reserved_special_token_1|>
import numpy as np
from .build_processing_chain import build_processing_chain
from collections import namedtuple
from pprint import pprint
def run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None, verbosity=0):
"""
Run one iteration of DSP on tb_data
Optionally returns a value for optimization
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed for this iteration (see
build_processing_chain()) and the list of output variables to appear in
the output table
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
fom_function : function or None (optional)
When given the output lh5 table of this DSP iteration, the
fom_function must return a scalar figure-of-merit value upon which the
optimization will be based. Should accept verbosity as a second argument
verbosity : int (optional)
verbosity for the processing chain and fom_function calls
Returns:
--------
figure_of_merit : float
If fom_function is not None, returns figure-of-merit value for the DSP iteration
tb_out : lh5 Table
If fom_function is None, returns the output lh5 table for the DSP iteration
"""
pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=db_dict, verbosity=verbosity)
pc.execute()
if fom_function is not None: return fom_function(tb_out, verbosity)
else: return tb_out
ParGridDimension = namedtuple('ParGridDimension', 'name i_arg value_strs companions')
class ParGrid():
""" Parameter Grid class
Each ParGrid entry corresponds to a dsp parameter to be varied.
The ntuples must follow the pattern:
( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )
where name is the name of the dsp routine in dsp_config whose to be
optimized, i_arg is the index of the argument to be varied, value_strs is
the array of strings to set the argument to, and companions is an optional
list of ( name, i_arg, value_strs ) tuples for companion arguments that
need to change along with this one
Optionally, i_arg can be a list of the argument indices to be varied together,
where value_strs is a list of lists correponding to the strings to set the
arguments to in the same order.
"""
def __init__(self):
self.dims = []
def add_dimension(self, name, i_arg, value_strs, companions=None):
self.dims.append( ParGridDimension(name, i_arg, value_strs, companions) )
def get_n_dimensions(self):
return len(self.dims)
def get_n_points_of_dim(self, i):
return len(self.dims[i].value_strs)
def get_shape(self):
shape = ()
for i in range(self.get_n_dimensions()):
shape += (self.get_n_points_of_dim(i),)
return shape
def get_n_grid_points(self):
return np.prod(self.get_shape())
def get_par_meshgrid(self, copy=False, sparse=False):
""" return a meshgrid of parameter values
Always uses Matrix indexing (natural for par grid) so that
mg[i1][i2][...] corresponds to index order in self.dims
Note copy is False by default as opposed to numpy default of True
"""
axes = []
for i in range(self.get_n_dimensions()):
axes.append(self.dims[i].values_strs)
return np.meshgrid(*axes, copy, sparse, indexing='ij')
def get_zero_indices(self):
return np.zeros(self.get_n_dimensions(), dtype=np.uint32)
def iterate_indices(self, indices):
""" iterate given indices [i1, i2, ...] by one.
For easier iteration. The convention here is arbitrary, but its the
order the arrays would be traversed in a series of nested for loops in
the order appearin in dims (first dimension is first for loop, etc):
Return False when the grid runs out of indices. Otherwise returns True.
"""
for iD in reversed(range(self.get_n_dimensions())):
indices[iD] += 1
if indices[iD] < self.get_n_points_of_dim(iD): return True
indices[iD] = 0
return False
def get_data(self, i_dim, i_par):
name = self.dims[i_dim].name
i_arg = self.dims[i_dim].i_arg
value_str = self.dims[i_dim].value_strs[i_par]
companions = self.dims[i_dim].companions
return name, i_arg, value_str, companions
def print_data(self, indices):
print(f"Grid point at indices {indices}:")
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, _ = self.get_data(i_dim, i_par)
print(f"{name}[{i_arg}] = {value_str}")
def set_dsp_pars(self, dsp_config, indices):
for i_dim, i_par in enumerate(indices):
name, i_arg, value_str, companions = self.get_data(i_dim, i_par)
if dsp_config['processors'][name].get('init_args') is not None:
if np.isscalar(i_arg):
dsp_config['processors'][name]['init_args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['init_args'][i_arg[i]] = value_str[i]
if companions is None: continue
for ( c_name, c_i_arg, c_value_str ) in companions:
dsp_config['processors'][c_name]['init_args'][c_i_arg] = c_value_str[i_par]
else:
if np.isscalar(i_arg):
dsp_config['processors'][name]['args'][i_arg] = value_str
else:
for i in range(len(i_arg)):
dsp_config['processors'][name]['args'][i_arg[i]] = value_str[i]
if companions is None: continue
for ( c_name, c_i_arg, c_value_str ) in companions:
dsp_config['processors'][c_name]['args'][c_i_arg] = c_value_str[i_par]
def run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64, db_dict=None, verbosity=0):
"""Extract a table of optimization values for a grid of DSP parameters
The grid argument defines a list of parameters and values over which to run
the DSP defined in dsp_config on tb_data. At each point, a scalar
figure-of-merit is extracted
Returns a N-dimensional ndarray of figure-of-merit values, where the array
axes are in the order they appear in grid.
Parameters:
-----------
tb_data : lh5 Table
An input table of lh5 data. Typically a selection is made prior to
sending tb_data to this function: optimization typically doesn't have to
run over all data
dsp_config : dict
Specifies the DSP to be performed (see build_processing_chain()) and the
list of output variables to appear in the output table for each grid point
grid : ParGrid
See ParGrid class for format
fom_function : function
When given the output lh5 table of this DSP iteration, the fom_function
must return a scalar figure-of-merit. Should accept verbosity as a
second keyword argument
dtype : dtype (optional)
The data type of the fom_function's return object. Should be np.ndarray if
fom_function is set to None
db_dict : dict (optional)
DSP parameters database. See build_processing_chain for formatting info
verbosity : int (optional)
Verbosity for the processing chain and fom_function calls
Returns:
--------
grid_values : ndarray of floats
An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row
of the grid argument
"""
grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)
iii = grid.get_zero_indices()
if verbosity > 0: print("Starting grid calculations...")
while True:
grid.set_dsp_pars(dsp_config, iii)
if verbosity > 1: pprint(dsp_config)
if verbosity > 0: grid.print_data(iii)
grid_values[tuple(iii)] = run_one_dsp(
tb_data, dsp_config, db_dict=db_dict, fom_function=fom_function, verbosity=verbosity)
if verbosity > 0: print('Value:', grid_values[tuple(iii)])
if not grid.iterate_indices(iii): break
return grid_values
|
flexible
|
{
"blob_id": "efe2d6f5da36679b77de32d631cca50c2c1dd29e",
"index": 5170,
"step-1": "<mask token>\n\n\nclass ParGrid:\n <mask token>\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n <mask token>\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n <mask token>\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,\n verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=\n db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None:\n return fom_function(tb_out, verbosity)\n else:\n return tb_out\n\n\n<mask token>\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,\n db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0:\n print('Starting grid calculations...')\n while True:\n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1:\n pprint(dsp_config)\n if verbosity > 0:\n grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=\n db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0:\n print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii):\n break\n return grid_values\n",
"step-4": "import numpy as np\nfrom .build_processing_chain import build_processing_chain\nfrom collections import namedtuple\nfrom pprint import pprint\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None,\n verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=\n db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None:\n return fom_function(tb_out, verbosity)\n else:\n return tb_out\n\n\nParGridDimension = namedtuple('ParGridDimension',\n 'name i_arg value_strs companions')\n\n\nclass ParGrid:\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append(ParGridDimension(name, i_arg, value_strs, companions))\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += self.get_n_points_of_dim(i),\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\"\n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD):\n return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f'Grid point at indices {indices}:')\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f'{name}[{i_arg}] = {value_str}')\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg\n ] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg\n ] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]\n ] = value_str[i]\n if companions is None:\n continue\n for c_name, c_i_arg, c_value_str in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg\n ] = c_value_str[i_par]\n\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64,\n db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0:\n print('Starting grid calculations...')\n while True:\n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1:\n pprint(dsp_config)\n if verbosity > 0:\n grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(tb_data, dsp_config, db_dict=\n db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0:\n print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii):\n break\n return grid_values\n",
"step-5": "import numpy as np\nfrom .build_processing_chain import build_processing_chain\nfrom collections import namedtuple\nfrom pprint import pprint\n\n\ndef run_one_dsp(tb_data, dsp_config, db_dict=None, fom_function=None, verbosity=0):\n \"\"\"\n Run one iteration of DSP on tb_data \n\n Optionally returns a value for optimization\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed for this iteration (see\n build_processing_chain()) and the list of output variables to appear in\n the output table\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n fom_function : function or None (optional)\n When given the output lh5 table of this DSP iteration, the\n fom_function must return a scalar figure-of-merit value upon which the\n optimization will be based. Should accept verbosity as a second argument\n verbosity : int (optional)\n verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n figure_of_merit : float\n If fom_function is not None, returns figure-of-merit value for the DSP iteration\n tb_out : lh5 Table\n If fom_function is None, returns the output lh5 table for the DSP iteration\n \"\"\"\n \n pc, _, tb_out = build_processing_chain(tb_data, dsp_config, db_dict=db_dict, verbosity=verbosity)\n pc.execute()\n if fom_function is not None: return fom_function(tb_out, verbosity)\n else: return tb_out\n\n\n\nParGridDimension = namedtuple('ParGridDimension', 'name i_arg value_strs companions')\n\nclass ParGrid():\n \"\"\" Parameter Grid class\n\n Each ParGrid entry corresponds to a dsp parameter to be varied.\n The ntuples must follow the pattern: \n ( name, i_arg, value_strs, companions) : ( str, int, list of str, list, or None )\n where name is the name of the dsp routine in dsp_config whose to be\n optimized, i_arg is the index of the argument to be varied, value_strs is\n the array of strings to set the argument to, and companions is an optional\n list of ( name, i_arg, value_strs ) tuples for companion arguments that\n need to change along with this one\n \n Optionally, i_arg can be a list of the argument indices to be varied together,\n where value_strs is a list of lists correponding to the strings to set the\n arguments to in the same order.\n \"\"\"\n def __init__(self):\n self.dims = []\n\n def add_dimension(self, name, i_arg, value_strs, companions=None):\n self.dims.append( ParGridDimension(name, i_arg, value_strs, companions) )\n\n def get_n_dimensions(self):\n return len(self.dims)\n\n def get_n_points_of_dim(self, i):\n return len(self.dims[i].value_strs)\n\n def get_shape(self):\n shape = ()\n for i in range(self.get_n_dimensions()):\n shape += (self.get_n_points_of_dim(i),)\n return shape\n\n def get_n_grid_points(self):\n return np.prod(self.get_shape())\n\n def get_par_meshgrid(self, copy=False, sparse=False):\n \"\"\" return a meshgrid of parameter values\n\n Always uses Matrix indexing (natural for par grid) so that\n mg[i1][i2][...] corresponds to index order in self.dims\n\n Note copy is False by default as opposed to numpy default of True\n \"\"\" \n axes = []\n for i in range(self.get_n_dimensions()):\n axes.append(self.dims[i].values_strs)\n return np.meshgrid(*axes, copy, sparse, indexing='ij')\n\n def get_zero_indices(self):\n return np.zeros(self.get_n_dimensions(), dtype=np.uint32)\n\n def iterate_indices(self, indices):\n \"\"\" iterate given indices [i1, i2, ...] by one.\n\n For easier iteration. The convention here is arbitrary, but its the\n order the arrays would be traversed in a series of nested for loops in\n the order appearin in dims (first dimension is first for loop, etc):\n\n Return False when the grid runs out of indices. Otherwise returns True.\n \"\"\"\n for iD in reversed(range(self.get_n_dimensions())):\n indices[iD] += 1\n if indices[iD] < self.get_n_points_of_dim(iD): return True\n indices[iD] = 0\n return False\n\n def get_data(self, i_dim, i_par):\n name = self.dims[i_dim].name\n i_arg = self.dims[i_dim].i_arg\n value_str = self.dims[i_dim].value_strs[i_par]\n companions = self.dims[i_dim].companions\n return name, i_arg, value_str, companions\n\n def print_data(self, indices):\n print(f\"Grid point at indices {indices}:\")\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, _ = self.get_data(i_dim, i_par)\n print(f\"{name}[{i_arg}] = {value_str}\")\n\n def set_dsp_pars(self, dsp_config, indices):\n for i_dim, i_par in enumerate(indices):\n name, i_arg, value_str, companions = self.get_data(i_dim, i_par)\n if dsp_config['processors'][name].get('init_args') is not None:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['init_args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['init_args'][i_arg[i]] = value_str[i]\n if companions is None: continue\n for ( c_name, c_i_arg, c_value_str ) in companions:\n dsp_config['processors'][c_name]['init_args'][c_i_arg] = c_value_str[i_par]\n else:\n if np.isscalar(i_arg):\n dsp_config['processors'][name]['args'][i_arg] = value_str\n else:\n for i in range(len(i_arg)):\n dsp_config['processors'][name]['args'][i_arg[i]] = value_str[i]\n if companions is None: continue\n for ( c_name, c_i_arg, c_value_str ) in companions:\n dsp_config['processors'][c_name]['args'][c_i_arg] = c_value_str[i_par]\n\ndef run_grid(tb_data, dsp_config, grid, fom_function, dtype=np.float64, db_dict=None, verbosity=0):\n \"\"\"Extract a table of optimization values for a grid of DSP parameters \n\n The grid argument defines a list of parameters and values over which to run\n the DSP defined in dsp_config on tb_data. At each point, a scalar\n figure-of-merit is extracted\n\n Returns a N-dimensional ndarray of figure-of-merit values, where the array\n axes are in the order they appear in grid.\n\n Parameters:\n -----------\n tb_data : lh5 Table\n An input table of lh5 data. Typically a selection is made prior to\n sending tb_data to this function: optimization typically doesn't have to\n run over all data\n dsp_config : dict\n Specifies the DSP to be performed (see build_processing_chain()) and the\n list of output variables to appear in the output table for each grid point\n grid : ParGrid\n See ParGrid class for format\n fom_function : function \n When given the output lh5 table of this DSP iteration, the fom_function\n must return a scalar figure-of-merit. Should accept verbosity as a\n second keyword argument\n dtype : dtype (optional)\n The data type of the fom_function's return object. Should be np.ndarray if\n fom_function is set to None\n db_dict : dict (optional)\n DSP parameters database. See build_processing_chain for formatting info\n verbosity : int (optional)\n Verbosity for the processing chain and fom_function calls\n\n Returns:\n --------\n grid_values : ndarray of floats\n An N-dimensional numpy ndarray whose Mth axis corresponds to the Mth row\n of the grid argument\n \"\"\"\n\n grid_values = np.ndarray(shape=grid.get_shape(), dtype=dtype)\n iii = grid.get_zero_indices()\n if verbosity > 0: print(\"Starting grid calculations...\")\n while True: \n grid.set_dsp_pars(dsp_config, iii)\n if verbosity > 1: pprint(dsp_config)\n if verbosity > 0: grid.print_data(iii)\n grid_values[tuple(iii)] = run_one_dsp(\n tb_data, dsp_config, db_dict=db_dict, fom_function=fom_function, verbosity=verbosity)\n if verbosity > 0: print('Value:', grid_values[tuple(iii)])\n if not grid.iterate_indices(iii): break\n return grid_values\n \n",
"step-ids": [
11,
14,
16,
18,
19
]
}
|
[
11,
14,
16,
18,
19
] |
numbers = [3, 7, 5]
maxNumber = 0
for number in numbers:
if maxNumber < number:
maxNumber = number
print maxNumber
|
normal
|
{
"blob_id": "2d9d66ea8a95285744b797570bfbeaa17fdc922a",
"index": 4036,
"step-1": "numbers = [3, 7, 5]\nmaxNumber = 0\nfor number in numbers:\n if maxNumber < number:\n maxNumber = number\n\nprint maxNumber",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import pandas as pd
from sqlalchemy import create_engine
# file = 'testfile.csv'
# print(pd.read_csv(file, nrows=5))
with open('testfile_short1.csv', 'r') as original: data = original.read()
for i in range(2):
with open('testfile_short3.csv', 'a') as modified: modified.write(data)
|
normal
|
{
"blob_id": "d7b45e76f150107cd62be160e8938f17dad90623",
"index": 58,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n",
"step-3": "import pandas as pd\nfrom sqlalchemy import create_engine\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n",
"step-4": "import pandas as pd\nfrom sqlalchemy import create_engine\n# file = 'testfile.csv'\n\n# print(pd.read_csv(file, nrows=5))\n\nwith open('testfile_short1.csv', 'r') as original: data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified: modified.write(data)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
regr.fit(X_train, y_train)
print(regr.score(X_test, y_test))
<|reserved_special_token_0|>
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,
y_pred)))
plt.scatter(X_test, y_test, color='b')
plt.plot(X_test, y_pred, color='k')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
dataset = pd.read_csv('./dataset/datafile.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
X = np.array(X).reshape(-1, 1)
y = np.array(X).reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
regr = LinearRegression()
regr.fit(X_train, y_train)
print(regr.score(X_test, y_test))
y_pred = regr.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,
y_pred)))
plt.scatter(X_test, y_test, color='b')
plt.plot(X_test, y_pred, color='k')
plt.show()
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
dataset = pd.read_csv('./dataset/datafile.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
X = np.array(X).reshape(-1, 1)
y = np.array(X).reshape(-1, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
regr = LinearRegression()
regr.fit(X_train, y_train)
print(regr.score(X_test, y_test))
y_pred = regr.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,
y_pred)))
plt.scatter(X_test, y_test, color='b')
plt.plot(X_test, y_pred, color='k')
plt.show()
<|reserved_special_token_1|>
#Testing Operating System Descriptions
#OS : LMDE 4 Debbie
#Version: 4.6.7
#Kernal Version : 4.19.0-8-amd64
#Scripting Langages : Python3
#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Libraries used
#pandas is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,
import pandas as pd
#NumPy is a Python library used for working with arrays.
#NumPy aims to provide an array object that is up to 50x faster than traditional Python lists.Arrays are very frequently used in data science, where speed and resources are very important.
import numpy as np
#Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python
#matplotlib.pyplot is a state-based interface to matplotlib. It provides a MATLAB-like way of plotting.
#pyplot is mainly intended for interactive plots and simple cases of programmatic plot generation
import matplotlib.pyplot as plt
#Simple and efficient tools for predictive data analysis. Built on NumPy, SciPy, and matplotlib. Scikit-learn is probably the most useful library for machine learning in Python.
#The sklearn library contains a lot of efficient tools for machine learning and statistical modeling including classification, regression, clustering and dimensionality reduction.
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
#-----------------------------------------------------------------------------------------------------------------------------
#Reading the dataset
dataset = pd.read_csv('./dataset/datafile.csv')
#----------------------------------------------------------------------------------------------------------------------------
#Extracting X and Y values
#Preparing the Data
#divide the data into "attributes" and "labels". Attributes are the independent variables while labels are dependent variables whose values are to be predicted
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 1].values
#------------------------------------------------------------------------------------------------------------------------------
#converting to arrays of data
X = np.array(X).reshape(-1, 1)
y = np.array(X).reshape(-1, 1)
#-----------------------------------------------------------------------------------------------------------------------------
#we have our attributes and labels, the next step is to split this data into training and test sets. We'll do this by using Scikit-Learn's built-in train_test_split()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
#The above script splits 80% of the data to training set while 20% of the data to test set. The test_size variable is where we actually specify the proportion of test set.
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Training the Algorithm
regr = LinearRegression()
regr.fit(X_train, y_train)
print(regr.score(X_test, y_test))
#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Making Predictions
y_pred = regr.predict(X_test)
#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Evaluating the Algorithm
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Plotting the scatter
plt.scatter(X_test, y_test, color ='b')
plt.plot(X_test, y_pred, color ='k')
plt.show()
|
flexible
|
{
"blob_id": "422491852b80c2fc4a2e73c01fd01acaad4cf9c8",
"index": 7573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nregr.fit(X_train, y_train)\nprint(regr.score(X_test, y_test))\n<mask token>\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,\n y_pred)))\nplt.scatter(X_test, y_test, color='b')\nplt.plot(X_test, y_pred, color='k')\nplt.show()\n",
"step-3": "<mask token>\ndataset = pd.read_csv('./dataset/datafile.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 1].values\nX = np.array(X).reshape(-1, 1)\ny = np.array(X).reshape(-1, 1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nregr = LinearRegression()\nregr.fit(X_train, y_train)\nprint(regr.score(X_test, y_test))\ny_pred = regr.predict(X_test)\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,\n y_pred)))\nplt.scatter(X_test, y_test, color='b')\nplt.plot(X_test, y_pred, color='k')\nplt.show()\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\ndataset = pd.read_csv('./dataset/datafile.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 1].values\nX = np.array(X).reshape(-1, 1)\ny = np.array(X).reshape(-1, 1)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)\nregr = LinearRegression()\nregr.fit(X_train, y_train)\nprint(regr.score(X_test, y_test))\ny_pred = regr.predict(X_test)\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test,\n y_pred)))\nplt.scatter(X_test, y_test, color='b')\nplt.plot(X_test, y_pred, color='k')\nplt.show()\n",
"step-5": "#Testing Operating System Descriptions\n\n#OS : LMDE 4 Debbie\n#Version: 4.6.7\n#Kernal Version : 4.19.0-8-amd64\n\n#Scripting Langages : Python3\n\n#----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#Libraries used\n\n#pandas is a fast, powerful, flexible and easy to use open source data analysis and manipulation tool,\nimport pandas as pd\n\n#NumPy is a Python library used for working with arrays.\n#NumPy aims to provide an array object that is up to 50x faster than traditional Python lists.Arrays are very frequently used in data science, where speed and resources are very important.\nimport numpy as np\n\n#Matplotlib is a comprehensive library for creating static, animated, and interactive visualizations in Python\n#matplotlib.pyplot is a state-based interface to matplotlib. It provides a MATLAB-like way of plotting.\n#pyplot is mainly intended for interactive plots and simple cases of programmatic plot generation\nimport matplotlib.pyplot as plt\n\n#Simple and efficient tools for predictive data analysis. Built on NumPy, SciPy, and matplotlib. Scikit-learn is probably the most useful library for machine learning in Python. \n#The sklearn library contains a lot of efficient tools for machine learning and statistical modeling including classification, regression, clustering and dimensionality reduction.\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\n\n#-----------------------------------------------------------------------------------------------------------------------------\n#Reading the dataset\ndataset = pd.read_csv('./dataset/datafile.csv')\n\n#----------------------------------------------------------------------------------------------------------------------------\n#Extracting X and Y values\n#Preparing the Data\n#divide the data into \"attributes\" and \"labels\". Attributes are the independent variables while labels are dependent variables whose values are to be predicted\n\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, 1].values\n\n#------------------------------------------------------------------------------------------------------------------------------\n#converting to arrays of data\nX = np.array(X).reshape(-1, 1) \ny = np.array(X).reshape(-1, 1)\n\n#-----------------------------------------------------------------------------------------------------------------------------\n#we have our attributes and labels, the next step is to split this data into training and test sets. We'll do this by using Scikit-Learn's built-in train_test_split()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2) \n\n#The above script splits 80% of the data to training set while 20% of the data to test set. The test_size variable is where we actually specify the proportion of test set.\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n#Training the Algorithm\n\nregr = LinearRegression() \nregr.fit(X_train, y_train) \nprint(regr.score(X_test, y_test)) \n\n#---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#Making Predictions\n\ny_pred = regr.predict(X_test) \n\n#-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#Evaluating the Algorithm\nprint('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))\nprint('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))\nprint('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n#Plotting the scatter\nplt.scatter(X_test, y_test, color ='b') \nplt.plot(X_test, y_pred, color ='k') \nplt.show()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
<|reserved_special_token_0|>
print(val)
<|reserved_special_token_0|>
print(newVal)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
first_date = date(2014, 7, 2)
second_date = date(2014, 7, 11)
current_date = date.today()
val = diff_in_date(first_date, second_date)
print(val)
newVal = diff_in_date(second_date, current_date)
print(newVal)
<|reserved_special_token_1|>
from datetime import date
def diff_in_date(first, second):
value = str(second - first)
if value.__contains__(','):
generated_sum = value.split(',')
return generated_sum[0]
else:
return value
first_date = date(2014, 7, 2)
second_date = date(2014, 7, 11)
current_date = date.today()
val = diff_in_date(first_date, second_date)
print(val)
newVal = diff_in_date(second_date, current_date)
print(newVal)
|
flexible
|
{
"blob_id": "9b6d30a40bafa0e9e4760843d6a2f750f0f88a57",
"index": 6106,
"step-1": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\n<mask token>\nprint(val)\n<mask token>\nprint(newVal)\n",
"step-3": "<mask token>\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n",
"step-4": "from datetime import date\n\n\ndef diff_in_date(first, second):\n value = str(second - first)\n if value.__contains__(','):\n generated_sum = value.split(',')\n return generated_sum[0]\n else:\n return value\n\n\nfirst_date = date(2014, 7, 2)\nsecond_date = date(2014, 7, 11)\ncurrent_date = date.today()\nval = diff_in_date(first_date, second_date)\nprint(val)\nnewVal = diff_in_date(second_date, current_date)\nprint(newVal)\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
#
# Author:: Noah Kantrowitz <[email protected]>
#
# Copyright 2014, Noah Kantrowitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from fabric.api import task, roles
import pytest
from fabric_rundeck import visitor
def fixture_path(*path):
return os.path.join(os.path.dirname(__file__), 'data', *path)
class TestUnwrap(object):
@pytest.fixture
def fn(self):
def fn():
pass
return fn
def test_fn(self, fn):
assert visitor.unwrap(fn) is fn
def test_task(self, fn):
t = task(fn)
assert visitor.unwrap(t) is fn
def test_taskcall(self, fn):
t = task()(fn)
assert visitor.unwrap(t) is fn
def test_task_roles(self, fn):
t = task(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_taskcall_roles(self, fn):
t = task()(roles('foo')(fn))
assert visitor.unwrap(t) is fn
def test_roles_task(self, fn):
t = roles('foo')(task(fn))
assert visitor.unwrap(t) is fn
def test_roles_taskcall(self, fn):
t = roles('foo')(task()(fn))
assert visitor.unwrap(t) is fn
def test_lambda(self):
fn = lambda: None
assert visitor.unwrap(fn) is fn
def test_lambda_task(self):
fn = lambda: None
t = task(fn)
assert visitor.unwrap(t) is fn
class TestVisitTask(object):
def test_no_args(self):
def fn():
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_simple_args(self):
def fn(a, b):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b'],
'varargs': None,
'keywords': None,
'defaults': None,
},
}
def test_arg_defaults(self):
def fn(a, b=1, c=None):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': ['a', 'b', 'c'],
'varargs': None,
'keywords': None,
'defaults': (1, None),
},
}
def test_varargs(self):
def fn(*args, **kwargs):
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': None,
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
def test_docs(self):
def fn(*args, **kwargs):
"""I am a teapot."""
pass
assert visitor.visit_task(fn, ()) == {
'name': 'fn',
'path': (),
'doc': 'I am a teapot.',
'cron': None,
'argspec': {
'args': [],
'varargs': 'args',
'keywords': 'kwargs',
'defaults': None,
},
}
class TestVisit(object):
def test_single(self):
def fn():
pass
callables = {
'fn': fn,
}
data = visitor.visit(callables)
assert len(data) == 1
assert data[0]['name'] == 'fn'
def test_multi(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'fn2': fn2,
'fn3': fn3,
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[1]['name'] == 'fn2'
assert data[2]['name'] == 'fn3'
def test_nested(self):
def fn():
pass
def fn2():
pass
def fn3():
pass
callables = {
'fn': fn,
'mod': {
'fn2': fn2,
'fn3': fn3,
}
}
data = visitor.visit(callables)
assert len(data) == 3
assert data[0]['name'] == 'fn'
assert data[0]['path'] == ()
assert data[1]['name'] == 'fn2'
assert data[1]['path'] == ('mod',)
assert data[2]['name'] == 'fn3'
assert data[2]['path'] == ('mod',)
class TestVisitFabfile(object):
def test_one(self):
data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))
assert len(data) == 3
|
normal
|
{
"blob_id": "a1e563f94044ff7cd7e0e55542bc4ca2db81df28",
"index": 9749,
"step-1": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-2": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n <mask token>\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-3": "<mask token>\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n <mask token>\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n <mask token>\n <mask token>\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-4": "import os\nfrom fabric.api import task, roles\nimport pytest\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n\n @pytest.fixture\n def fn(self):\n\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda : None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda : None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n\n def test_no_args(self):\n\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n None, 'keywords': None, 'defaults': None}}\n\n def test_simple_args(self):\n\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b'],\n 'varargs': None, 'keywords': None, 'defaults': None}}\n\n def test_arg_defaults(self):\n\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': ['a', 'b', 'c'],\n 'varargs': None, 'keywords': None, 'defaults': (1, None)}}\n\n def test_varargs(self):\n\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': None, 'cron': None, 'argspec': {'args': [], 'varargs':\n 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n def test_docs(self):\n\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {'name': 'fn', 'path': (),\n 'doc': 'I am a teapot.', 'cron': None, 'argspec': {'args': [],\n 'varargs': 'args', 'keywords': 'kwargs', 'defaults': None}}\n\n\nclass TestVisit(object):\n\n def test_single(self):\n\n def fn():\n pass\n callables = {'fn': fn}\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'fn2': fn2, 'fn3': fn3}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n\n def fn():\n pass\n\n def fn2():\n pass\n\n def fn3():\n pass\n callables = {'fn': fn, 'mod': {'fn2': fn2, 'fn3': fn3}}\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-5": "#\n# Author:: Noah Kantrowitz <[email protected]>\n#\n# Copyright 2014, Noah Kantrowitz\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\n\nfrom fabric.api import task, roles\nimport pytest\n\nfrom fabric_rundeck import visitor\n\n\ndef fixture_path(*path):\n return os.path.join(os.path.dirname(__file__), 'data', *path)\n\n\nclass TestUnwrap(object):\n @pytest.fixture\n def fn(self):\n def fn():\n pass\n return fn\n\n def test_fn(self, fn):\n assert visitor.unwrap(fn) is fn\n\n def test_task(self, fn):\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n def test_taskcall(self, fn):\n t = task()(fn)\n assert visitor.unwrap(t) is fn\n\n def test_task_roles(self, fn):\n t = task(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_taskcall_roles(self, fn):\n t = task()(roles('foo')(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_task(self, fn):\n t = roles('foo')(task(fn))\n assert visitor.unwrap(t) is fn\n\n def test_roles_taskcall(self, fn):\n t = roles('foo')(task()(fn))\n assert visitor.unwrap(t) is fn\n\n def test_lambda(self):\n fn = lambda: None\n assert visitor.unwrap(fn) is fn\n\n def test_lambda_task(self):\n fn = lambda: None\n t = task(fn)\n assert visitor.unwrap(t) is fn\n\n\nclass TestVisitTask(object):\n def test_no_args(self):\n def fn():\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_simple_args(self):\n def fn(a, b):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': None,\n },\n }\n\n def test_arg_defaults(self):\n def fn(a, b=1, c=None):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': ['a', 'b', 'c'],\n 'varargs': None,\n 'keywords': None,\n 'defaults': (1, None),\n },\n }\n\n def test_varargs(self):\n def fn(*args, **kwargs):\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': None,\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n def test_docs(self):\n def fn(*args, **kwargs):\n \"\"\"I am a teapot.\"\"\"\n pass\n assert visitor.visit_task(fn, ()) == {\n 'name': 'fn',\n 'path': (),\n 'doc': 'I am a teapot.',\n 'cron': None,\n 'argspec': {\n 'args': [],\n 'varargs': 'args',\n 'keywords': 'kwargs',\n 'defaults': None,\n },\n }\n\n\nclass TestVisit(object):\n def test_single(self):\n def fn():\n pass\n callables = {\n 'fn': fn,\n }\n data = visitor.visit(callables)\n assert len(data) == 1\n assert data[0]['name'] == 'fn'\n\n def test_multi(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'fn2': fn2,\n 'fn3': fn3,\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[1]['name'] == 'fn2'\n assert data[2]['name'] == 'fn3'\n\n def test_nested(self):\n def fn():\n pass\n def fn2():\n pass\n def fn3():\n pass\n callables = {\n 'fn': fn,\n 'mod': {\n 'fn2': fn2,\n 'fn3': fn3,\n }\n }\n data = visitor.visit(callables)\n assert len(data) == 3\n assert data[0]['name'] == 'fn'\n assert data[0]['path'] == ()\n assert data[1]['name'] == 'fn2'\n assert data[1]['path'] == ('mod',)\n assert data[2]['name'] == 'fn3'\n assert data[2]['path'] == ('mod',)\n\n\nclass TestVisitFabfile(object):\n def test_one(self):\n data = visitor.visit_fabfile(fixture_path('fabfile_one.py'))\n assert len(data) == 3\n",
"step-ids": [
14,
15,
20,
25,
26
]
}
|
[
14,
15,
20,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def download(request):
if request.method == 'GET':
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,
download_quality, title)
return HttpResponse(file_url)
<|reserved_special_token_1|>
from django.shortcuts import render, redirect
from . import download_function
from django.http import HttpResponse
def download(request):
if request.method == 'GET':
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,
download_quality, title)
return HttpResponse(file_url)
<|reserved_special_token_1|>
from django.shortcuts import render,redirect
from . import download_function
from django.http import HttpResponse
# Create your views here.
def download(request):
if request.method == "GET":
session = request.GET['session']
title = request.GET['download_title']
download_quality = request.GET['download_quality']
file_url = download_function.download_generator(session,download_quality,title)
return HttpResponse(file_url)
|
flexible
|
{
"blob_id": "339506777f5471ec99b39c67c28df8ec3d06ce19",
"index": 3084,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-3": "from django.shortcuts import render, redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n\n\ndef download(request):\n if request.method == 'GET':\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n file_url = download_function.download_generator(session,\n download_quality, title)\n return HttpResponse(file_url)\n",
"step-4": "from django.shortcuts import render,redirect\nfrom . import download_function\nfrom django.http import HttpResponse\n# Create your views here.\ndef download(request):\n if request.method == \"GET\":\n session = request.GET['session']\n title = request.GET['download_title']\n download_quality = request.GET['download_quality']\n\n file_url = download_function.download_generator(session,download_quality,title)\n return HttpResponse(file_url)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
SECRET_KEY = env('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]
ADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
CACHES = {'default': {'BACKEND':
'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}
SECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',
default=True)
<|reserved_special_token_1|>
from .base import *
from .base import env
SECRET_KEY = env('DJANGO_SECRET_KEY')
ALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]
ADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]
DATABASES['default'] = env.db('DATABASE_URL')
DATABASES['default']['ATOMIC_REQUESTS'] = True
DATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)
CACHES = {'default': {'BACKEND':
'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}
SECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',
default=True)
<|reserved_special_token_1|>
from .base import * # noqa
from .base import env
# exemple https://github.com/pydanny/cookiecutter-django/blob/master/%7B%7Bcookiecutter.project_slug%7D%7D/config/settings/production.py
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
# Who to sent emails when errors arise
ADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
# avec cpanel à voir ce que l'on peut configurer avec xtremcache/varnish et django-varnish
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
},
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": env("XTREM_CACHE_URL"),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# # Mimicing memcache behavior.
# # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior
# "IGNORE_EXCEPTIONS": True,
# },
# }
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
|
flexible
|
{
"blob_id": "836df02495ee581f138050be6b7a7a076ea899eb",
"index": 4966,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\nDATABASES['default'] = env.db('DATABASE_URL')\nDATABASES['default']['ATOMIC_REQUESTS'] = True\nDATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nSECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)\nSECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',\n default=True)\n",
"step-3": "from .base import *\nfrom .base import env\nSECRET_KEY = env('DJANGO_SECRET_KEY')\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\nDATABASES['default'] = env.db('DATABASE_URL')\nDATABASES['default']['ATOMIC_REQUESTS'] = True\nDATABASES['default']['CONN_MAX_AGE'] = env.int('CONN_MAX_AGE', default=60)\nCACHES = {'default': {'BACKEND':\n 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': ''}}\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nSECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)\nSESSION_COOKIE_SECURE = True\nCSRF_COOKIE_SECURE = True\nSECURE_HSTS_SECONDS = 60\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n 'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)\nSECURE_HSTS_PRELOAD = env.bool('DJANGO_SECURE_HSTS_PRELOAD', default=True)\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool('DJANGO_SECURE_CONTENT_TYPE_NOSNIFF',\n default=True)\n",
"step-4": "from .base import * # noqa\nfrom .base import env\n\n# exemple https://github.com/pydanny/cookiecutter-django/blob/master/%7B%7Bcookiecutter.project_slug%7D%7D/config/settings/production.py\n\n# GENERAL\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts\nALLOWED_HOSTS = [x.split(':') for x in env.list('DJANGO_ALLOWED_HOSTS')]\n\n\n# https://docs.djangoproject.com/en/dev/ref/settings/#admins\n# Who to sent emails when errors arise\nADMINS = [x.split(':') for x in env.list('DJANGO_ADMINS')]\n\n# DATABASES\n# ------------------------------------------------------------------------------\nDATABASES[\"default\"] = env.db(\"DATABASE_URL\") # noqa F405\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True # noqa F405\nDATABASES[\"default\"][\"CONN_MAX_AGE\"] = env.int(\"CONN_MAX_AGE\", default=60) # noqa F405\n\n# CACHES\n# ------------------------------------------------------------------------------\n# avec cpanel à voir ce que l'on peut configurer avec xtremcache/varnish et django-varnish\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n \"LOCATION\": \"\",\n },\n # \"default\": {\n # \"BACKEND\": \"django_redis.cache.RedisCache\",\n # \"LOCATION\": env(\"XTREM_CACHE_URL\"),\n # \"OPTIONS\": {\n # \"CLIENT_CLASS\": \"django_redis.client.DefaultClient\",\n # # Mimicing memcache behavior.\n # # http://jazzband.github.io/django-redis/latest/#_memcached_exceptions_behavior\n # \"IGNORE_EXCEPTIONS\": True,\n # },\n # }\n}\n\n# SECURITY\n# ------------------------------------------------------------------------------\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True)\n# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure\nSESSION_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure\nCSRF_COOKIE_SECURE = True\n# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds\n# TODO: set this to 60 seconds first and then to 518400 once you prove the former works\nSECURE_HSTS_SECONDS = 60\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\n \"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True\n)\n# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload\nSECURE_HSTS_PRELOAD = env.bool(\"DJANGO_SECURE_HSTS_PRELOAD\", default=True)\n# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\n \"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True\n)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_blogs_common_data(request, blogs_all_list):
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.
num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.
year, created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
<|reserved_special_token_0|>
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.
created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.
created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month',
order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_blogs_common_data(request, blogs_all_list):
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.
num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.
year, created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
<|reserved_special_token_0|>
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.
created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.
created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month',
order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_blogs_common_data(request, blogs_all_list):
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.
num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.
year, created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year,
created_time__month=month)
context = get_blogs_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.
created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.
created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month',
order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from .models import Blog, BlogType
from django.conf import settings
from read_statistics.utils import read_statistics_once_read
from user.forms import LoginForm
def get_blogs_common_data(request, blogs_all_list):
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.
num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.
year, created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year,
created_time__month=month)
context = get_blogs_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.
created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.
created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month',
order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
<|reserved_special_token_1|>
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from .models import Blog, BlogType
from django.conf import settings
from read_statistics.utils import read_statistics_once_read
from user.forms import LoginForm
# Create your views here.
#分页函数
def get_blogs_common_data(request, blogs_all_list):
# 分页器
page_num = request.GET.get('page', 1)
paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)
page_of_blogs = paginator.get_page(page_num)
current_page_num = page_of_blogs.number
page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))
if page_range[0] != 1:
page_range.insert(0, 1)
if page_range[1] - page_range[0] >= 2:
page_range.insert(1, '...')
if page_range[-1] != paginator.num_pages:
page_range.append(paginator.num_pages)
if page_range[-1] - page_range[-2] >= 2:
page_range.insert(-1, '...')
# 获取日期归档的博客统计数量
blog_dates = dict()
all_dates = Blog.objects.dates('created_time', 'month', order='DESC')
for blogs_date in all_dates:
blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,
created_time__month=blogs_date.month).count()
blog_dates[blogs_date] = blogs_count
# 获取公共的数据
context = dict()
context['blogs'] = page_of_blogs
context['page_range'] = page_range
# 运用annotate方法给对象添加注释
#context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))
context['blog_types'] = BlogType.objects.all()
context['blog_dates'] = blog_dates
return context
def blog_list(request):
blogs_all_list = Blog.objects.all()
context = get_blogs_common_data(request, blogs_all_list)
return render(request, 'blog/blog_list.html', context)
def blogs_with_type(request, blog_type_pk):
blog_type = get_object_or_404(BlogType, pk=blog_type_pk)
blogs_all_list = Blog.objects.filter(blog_type=blog_type)
context = get_blogs_common_data(request, blogs_all_list)
context['blog_type'] = blog_type
return render(request, 'blog/blogs_with_type.html', context)
def blogs_with_date(request, year, month):
blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)
context = get_blogs_common_data(request, blogs_all_list)
context['blogs_with_date'] = '%s年%s' % (year, month)
return render(request, 'blog/blogs_with_date.html', context)
def blog_detail(request, blog_pk):
blog = get_object_or_404(Blog, pk=blog_pk)
read_cookie_key = read_statistics_once_read(request, blog)
context = dict()
context['blog'] = blog
context['blog_author'] = blog.author.get_nickname_or_username()
context['login_form'] = LoginForm()
context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()
context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()
context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')
response = render(request, 'blog/blog_detail.html', context)
response.set_cookie(read_cookie_key, 'true')
return response
|
flexible
|
{
"blob_id": "9731f45b19d40a031216f8a430c09764fd34e984",
"index": 2594,
"step-1": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-2": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\n<mask token>\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-3": "<mask token>\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-4": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n\ndef get_blogs_common_data(request, blogs_all_list):\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.\n num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.\n year, created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year,\n created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.\n created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.\n created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month',\n order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response\n",
"step-5": "from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator\nfrom .models import Blog, BlogType\nfrom django.conf import settings\nfrom read_statistics.utils import read_statistics_once_read\nfrom user.forms import LoginForm\n\n# Create your views here.\n#分页函数\ndef get_blogs_common_data(request, blogs_all_list):\n # 分页器\n page_num = request.GET.get('page', 1)\n paginator = Paginator(blogs_all_list, settings.BLOGS_PER_PAGE)\n page_of_blogs = paginator.get_page(page_num)\n current_page_num = page_of_blogs.number\n page_range = list(range(max(1, current_page_num - 2), min(paginator.num_pages + 1, current_page_num + 3)))\n if page_range[0] != 1:\n page_range.insert(0, 1)\n if page_range[1] - page_range[0] >= 2:\n page_range.insert(1, '...')\n if page_range[-1] != paginator.num_pages:\n page_range.append(paginator.num_pages)\n if page_range[-1] - page_range[-2] >= 2:\n page_range.insert(-1, '...')\n # 获取日期归档的博客统计数量\n blog_dates = dict()\n all_dates = Blog.objects.dates('created_time', 'month', order='DESC')\n for blogs_date in all_dates:\n blogs_count = Blog.objects.filter(created_time__year=blogs_date.year,\n created_time__month=blogs_date.month).count()\n blog_dates[blogs_date] = blogs_count\n\n # 获取公共的数据\n context = dict()\n context['blogs'] = page_of_blogs\n context['page_range'] = page_range\n # 运用annotate方法给对象添加注释\n #context['blog_types'] = BlogType.objects.annotate(blog_count=Count('blog'))\n context['blog_types'] = BlogType.objects.all()\n context['blog_dates'] = blog_dates\n return context\n\ndef blog_list(request):\n blogs_all_list = Blog.objects.all()\n context = get_blogs_common_data(request, blogs_all_list)\n return render(request, 'blog/blog_list.html', context)\n\ndef blogs_with_type(request, blog_type_pk):\n blog_type = get_object_or_404(BlogType, pk=blog_type_pk)\n blogs_all_list = Blog.objects.filter(blog_type=blog_type)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blog_type'] = blog_type\n return render(request, 'blog/blogs_with_type.html', context)\n\ndef blogs_with_date(request, year, month):\n blogs_all_list = Blog.objects.filter(created_time__year=year, created_time__month=month)\n context = get_blogs_common_data(request, blogs_all_list)\n context['blogs_with_date'] = '%s年%s' % (year, month)\n return render(request, 'blog/blogs_with_date.html', context)\n\ndef blog_detail(request, blog_pk):\n blog = get_object_or_404(Blog, pk=blog_pk)\n read_cookie_key = read_statistics_once_read(request, blog)\n context = dict()\n context['blog'] = blog\n context['blog_author'] = blog.author.get_nickname_or_username()\n context['login_form'] = LoginForm()\n context['pre_blog'] = Blog.objects.filter(created_time__gt=blog.created_time).last()\n context['next_blog'] = Blog.objects.filter(created_time__lt=blog.created_time).first()\n context['blog_dates'] = Blog.objects.dates('created_time', 'month', order='DESC')\n response = render(request, 'blog/blog_detail.html', context)\n response.set_cookie(read_cookie_key, 'true')\n return response",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class UserProfile(UserenaBaseProfile):
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length=128, blank=True, null=True)
class Meta:
permissions = ('change_profile', 'Change profile'), ('view_profile',
'View profile'), ('delete_profile', 'Delete profile')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserProfile(UserenaBaseProfile):
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length=128, blank=True, null=True)
class Meta:
permissions = ('change_profile', 'Change profile'), ('view_profile',
'View profile'), ('delete_profile', 'Delete profile')
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profie and set the permissions
"""
if created and instance.pk >= 0:
UserProfile.objects.create(user=instance)
try:
default_group = Group.objects.get(name='default_users')
instance.groups.add(default_group)
except:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserProfile(UserenaBaseProfile):
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length=128, blank=True, null=True)
class Meta:
permissions = ('change_profile', 'Change profile'), ('view_profile',
'View profile'), ('delete_profile', 'Delete profile')
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profie and set the permissions
"""
if created and instance.pk >= 0:
UserProfile.objects.create(user=instance)
try:
default_group = Group.objects.get(name='default_users')
instance.groups.add(default_group)
except:
pass
post_save.connect(create_user_profile, sender=User)
post_save.connect(create_api_key, sender=User)
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User, Group
from userena.models import UserenaBaseProfile
from django.db.models.signals import post_save
from tastypie.models import create_api_key
class UserProfile(UserenaBaseProfile):
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length=128, blank=True, null=True)
class Meta:
permissions = ('change_profile', 'Change profile'), ('view_profile',
'View profile'), ('delete_profile', 'Delete profile')
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profie and set the permissions
"""
if created and instance.pk >= 0:
UserProfile.objects.create(user=instance)
try:
default_group = Group.objects.get(name='default_users')
instance.groups.add(default_group)
except:
pass
post_save.connect(create_user_profile, sender=User)
post_save.connect(create_api_key, sender=User)
<|reserved_special_token_1|>
from django.db import models
from django.contrib.auth.models import User, Group
from userena.models import UserenaBaseProfile
from django.db.models.signals import post_save
from tastypie.models import create_api_key
class UserProfile(UserenaBaseProfile):
# user reference
user = models.OneToOneField(User)
facebook_id = models.CharField(max_length = 128, blank = True, null = True)
class Meta:
permissions = (
('change_profile', 'Change profile'),
('view_profile', 'View profile'),
('delete_profile', 'Delete profile'),
)
def create_user_profile(sender, instance, created, **kwargs):
"""
Create user profie and set the permissions
"""
if created and instance.pk >= 0:
UserProfile.objects.create(user=instance)
# get default group, but not for anonymous
try:
default_group = Group.objects.get(name = "default_users")
instance.groups.add(default_group)
except:
pass
post_save.connect(create_user_profile, sender=User)
# generate api key for the user when the user is created
post_save.connect(create_api_key, sender=User)
|
flexible
|
{
"blob_id": "6e6f153857879da625f57f0382f1997fcae4f6c8",
"index": 6041,
"step-1": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\npost_save.connect(create_user_profile, sender=User)\npost_save.connect(create_api_key, sender=User)\n",
"step-4": "from django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom userena.models import UserenaBaseProfile\nfrom django.db.models.signals import post_save\nfrom tastypie.models import create_api_key\n\n\nclass UserProfile(UserenaBaseProfile):\n user = models.OneToOneField(User)\n facebook_id = models.CharField(max_length=128, blank=True, null=True)\n\n\n class Meta:\n permissions = ('change_profile', 'Change profile'), ('view_profile',\n 'View profile'), ('delete_profile', 'Delete profile')\n\n\ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n try:\n default_group = Group.objects.get(name='default_users')\n instance.groups.add(default_group)\n except:\n pass\n\n\npost_save.connect(create_user_profile, sender=User)\npost_save.connect(create_api_key, sender=User)\n",
"step-5": "from django.db import models\nfrom django.contrib.auth.models import User, Group\nfrom userena.models import UserenaBaseProfile\nfrom django.db.models.signals import post_save\nfrom tastypie.models import create_api_key\n\nclass UserProfile(UserenaBaseProfile):\n # user reference\n user = models.OneToOneField(User)\n \n facebook_id = models.CharField(max_length = 128, blank = True, null = True)\n \n class Meta:\n permissions = (\n ('change_profile', 'Change profile'),\n ('view_profile', 'View profile'),\n ('delete_profile', 'Delete profile'),\n )\n \ndef create_user_profile(sender, instance, created, **kwargs):\n \"\"\"\n Create user profie and set the permissions\n \"\"\"\n if created and instance.pk >= 0:\n UserProfile.objects.create(user=instance)\n \n # get default group, but not for anonymous\n try:\n default_group = Group.objects.get(name = \"default_users\")\n instance.groups.add(default_group)\n except:\n pass\n \npost_save.connect(create_user_profile, sender=User)\n\n# generate api key for the user when the user is created\npost_save.connect(create_api_key, sender=User)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""main.py"""
import tkinter as tk
from tkinter import ttk
from ttkthemes import ThemedStyle
import wikipedia as wk
from newsapi import NewsApiClient as nac
import datetime
import random
class MainWindow:
"""Application controller object."""
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title("MetaWikipedia")
self.main_page.geometry("500x500")
self.style = ThemedStyle(self.main_page)
self.style.set_theme("scidblue")
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
# Left pane
self.search = ttk.Button(self.left_pane, text="Search", command=self.search_wikipedia)
self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text="Randomize", command=self.randomize)
self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)
extra_list_choices = ["none", "categories", "pageid", "sections", "html"]
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set("none")
self.extra_list = ttk.OptionMenu(
self.left_pane,
self.extra_list_choice,
*extra_list_choices,
command=self.update_choice
)
self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)
# Right pane
self.api_key_label = ttk.Label(self.right_pane, text="API Key")
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)
self.api_key_entry = ttk.Entry(self.right_pane, text="ABC...")
self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text="Top Categories")
self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0',tk.END).replace("\n",""))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state="normal")
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state="disabled")
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == "none":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', "")
if value == "categories":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == "pageid":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == "sections":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == "html":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ["wiki", "sources", "article", "stub",
"wayback", "cs1"]:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ""
for cat in cats:
hit = "hits" if self.category_map[cat] > 1 else "hit"
display += f"{cat}, {self.category_map[cat]} {hit}\n"
self.top_categories.configure(state="normal")
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state="disabled")
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == "":
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = (now-datetime.timedelta(days=14))
#today = now.strftime()
query = ""
for cat in self.sorted_categories():
query += f"{cat},"
search = api.get_top_headlines(q=query,
sources="bbc-news,the-verge",
language="en")
news = ""
for article in search["articles"]:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
if __name__ == "__main__":
main_window = MainWindow()
|
normal
|
{
"blob_id": "874fa927a1c0f1beeb31ca7b0de7fd2b16218ea4",
"index": 2756,
"step-1": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n <mask token>\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n",
"step-4": "<mask token>\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n",
"step-5": "\"\"\"main.py\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n \n def __init__(self):\n self.p = None\n \n self.main_page = tk.Tk()\n self.main_page.title(\"MetaWikipedia\")\n self.main_page.geometry(\"500x500\")\n\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme(\"scidblue\")\n\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n\n # Left pane\n self.search = ttk.Button(self.left_pane, text=\"Search\", command=self.search_wikipedia)\n self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)\n\n self.randomize_but = ttk.Button(self.left_pane, text=\"Randomize\", command=self.randomize)\n self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)\n\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)\n\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)\n\n extra_list_choices = [\"none\", \"categories\", \"pageid\", \"sections\", \"html\"]\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set(\"none\")\n self.extra_list = ttk.OptionMenu(\n self.left_pane,\n self.extra_list_choice,\n *extra_list_choices,\n command=self.update_choice\n )\n self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)\n\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)\n\n\n # Right pane\n self.api_key_label = ttk.Label(self.right_pane, text=\"API Key\")\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)\n\n self.api_key_entry = ttk.Entry(self.right_pane, text=\"ABC...\")\n self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)\n\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)\n\n self.top_categories_label = ttk.Label(self.right_pane, text=\"Top Categories\")\n self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)\n\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)\n\n self.category_map = {}\n\n self.randomize()\n \n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0',tk.END).replace(\"\\n\",\"\"))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state=\"normal\")\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state=\"disabled\")\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == \"none\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', \"\")\n if value == \"categories\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == \"pageid\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == \"sections\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == \"html\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in [\"wiki\", \"sources\", \"article\", \"stub\",\n \"wayback\", \"cs1\"]:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = \"\"\n for cat in cats:\n hit = \"hits\" if self.category_map[cat] > 1 else \"hit\"\n display += f\"{cat}, {self.category_map[cat]} {hit}\\n\"\n self.top_categories.configure(state=\"normal\")\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state=\"disabled\")\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == \"\":\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = (now-datetime.timedelta(days=14))\n #today = now.strftime()\n query = \"\"\n for cat in self.sorted_categories():\n query += f\"{cat},\"\n search = api.get_top_headlines(q=query,\n sources=\"bbc-news,the-verge\",\n language=\"en\")\n news = \"\"\n for article in search[\"articles\"]:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n\nif __name__ == \"__main__\":\n main_window = MainWindow()\n",
"step-ids": [
8,
9,
11,
12,
13
]
}
|
[
8,
9,
11,
12,
13
] |
import code2
print ("Main en code1: %s\n" % __name__)
|
normal
|
{
"blob_id": "ecbc1da3efb39300b60aeb47897fb01b6bd7af31",
"index": 6028,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Main en code1: %s\\n' % __name__)\n",
"step-3": "import code2\nprint('Main en code1: %s\\n' % __name__)\n",
"step-4": "\nimport code2\nprint (\"Main en code1: %s\\n\" % __name__)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def handler(event: Dict, context: Dict):
"""AWS Lambda handler."""
granule = event.get('granule')
prefix = granule[0:-6]
print(prefix)
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
print(response)
granules = []
contents = response['Contents']
for obj in contents:
granules.append(obj['Key'][0:-4])
granule_str = ','.join(granules)
output = {'granule': granule_str}
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(bucket)
if bucket is None:
raise Exception('No Input Bucket set')
def handler(event: Dict, context: Dict):
"""AWS Lambda handler."""
granule = event.get('granule')
prefix = granule[0:-6]
print(prefix)
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
print(response)
granules = []
contents = response['Contents']
for obj in contents:
granules.append(obj['Key'][0:-4])
granule_str = ','.join(granules)
output = {'granule': granule_str}
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
s3 = boto3.client('s3')
bucket = os.getenv('SENTINEL_INPUT_BUCKET', None)
print(bucket)
if bucket is None:
raise Exception('No Input Bucket set')
def handler(event: Dict, context: Dict):
"""AWS Lambda handler."""
granule = event.get('granule')
prefix = granule[0:-6]
print(prefix)
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
print(response)
granules = []
contents = response['Contents']
for obj in contents:
granules.append(obj['Key'][0:-4])
granule_str = ','.join(granules)
output = {'granule': granule_str}
return output
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from typing import Dict
import os
import re
import boto3
from botocore.errorfactory import ClientError
from datetime import date
s3 = boto3.client('s3')
bucket = os.getenv('SENTINEL_INPUT_BUCKET', None)
print(bucket)
if bucket is None:
raise Exception('No Input Bucket set')
def handler(event: Dict, context: Dict):
"""AWS Lambda handler."""
granule = event.get('granule')
prefix = granule[0:-6]
print(prefix)
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)
print(response)
granules = []
contents = response['Contents']
for obj in contents:
granules.append(obj['Key'][0:-4])
granule_str = ','.join(granules)
output = {'granule': granule_str}
return output
<|reserved_special_token_1|>
"""
HLS: Check if Twin Granule Exists
"""
from typing import Dict
import os
import re
import boto3
from botocore.errorfactory import ClientError
from datetime import date
s3 = boto3.client("s3")
bucket = os.getenv("SENTINEL_INPUT_BUCKET", None)
print(bucket)
if bucket is None:
raise Exception("No Input Bucket set")
def handler(event: Dict, context: Dict):
"""AWS Lambda handler."""
granule = event.get("granule")
prefix = granule[0:-6]
print(prefix)
response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix,)
print(response)
granules = []
contents = response["Contents"]
for obj in contents:
granules.append(obj["Key"][0:-4])
granule_str = ",".join(granules)
output = {
"granule": granule_str,
}
return output
|
flexible
|
{
"blob_id": "d2b05c5653ca6c6b7219f6c0393e81c9425b5977",
"index": 279,
"step-1": "<mask token>\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n",
"step-2": "<mask token>\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n",
"step-3": "<mask token>\ns3 = boto3.client('s3')\nbucket = os.getenv('SENTINEL_INPUT_BUCKET', None)\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n",
"step-4": "<mask token>\nfrom typing import Dict\nimport os\nimport re\nimport boto3\nfrom botocore.errorfactory import ClientError\nfrom datetime import date\ns3 = boto3.client('s3')\nbucket = os.getenv('SENTINEL_INPUT_BUCKET', None)\nprint(bucket)\nif bucket is None:\n raise Exception('No Input Bucket set')\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get('granule')\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix)\n print(response)\n granules = []\n contents = response['Contents']\n for obj in contents:\n granules.append(obj['Key'][0:-4])\n granule_str = ','.join(granules)\n output = {'granule': granule_str}\n return output\n",
"step-5": "\"\"\"\nHLS: Check if Twin Granule Exists\n\"\"\"\nfrom typing import Dict\nimport os\nimport re\nimport boto3\nfrom botocore.errorfactory import ClientError\nfrom datetime import date\n\ns3 = boto3.client(\"s3\")\nbucket = os.getenv(\"SENTINEL_INPUT_BUCKET\", None)\nprint(bucket)\nif bucket is None:\n raise Exception(\"No Input Bucket set\")\n\n\ndef handler(event: Dict, context: Dict):\n \"\"\"AWS Lambda handler.\"\"\"\n granule = event.get(\"granule\")\n prefix = granule[0:-6]\n print(prefix)\n response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix,)\n print(response)\n granules = []\n contents = response[\"Contents\"]\n for obj in contents:\n granules.append(obj[\"Key\"][0:-4])\n\n granule_str = \",\".join(granules)\n\n output = {\n \"granule\": granule_str,\n }\n return output\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Caine:
<|reserved_special_token_0|>
class Pisica:
def sunet(self):
print('miau')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Caine:
def sunet(self):
print('ham ham')
class Pisica:
def sunet(self):
print('miau')
def asculta_sunet(tipul_animalului):
tipul_animalului.sunet()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Caine:
def sunet(self):
print('ham ham')
class Pisica:
def sunet(self):
print('miau')
def asculta_sunet(tipul_animalului):
tipul_animalului.sunet()
<|reserved_special_token_0|>
asculta_sunet(CaineObj)
asculta_sunet(PisicaObj)
<|reserved_special_token_1|>
class Caine:
def sunet(self):
print('ham ham')
class Pisica:
def sunet(self):
print('miau')
def asculta_sunet(tipul_animalului):
tipul_animalului.sunet()
CaineObj = Caine()
PisicaObj = Pisica()
asculta_sunet(CaineObj)
asculta_sunet(PisicaObj)
<|reserved_special_token_1|>
#recapitulare polimorfism
class Caine:
def sunet(self):
print("ham ham")
class Pisica:
def sunet(self):
print("miau")
def asculta_sunet(tipul_animalului):# astapta obiect tipul animalului
tipul_animalului.sunet()#
CaineObj=Caine()#dau obiect
PisicaObj=Pisica()
asculta_sunet(CaineObj)
asculta_sunet(PisicaObj)
|
flexible
|
{
"blob_id": "594fdec916520014faff80dd06c7a5553320664d",
"index": 4746,
"step-1": "class Caine:\n <mask token>\n\n\nclass Pisica:\n\n def sunet(self):\n print('miau')\n\n\n<mask token>\n",
"step-2": "class Caine:\n\n def sunet(self):\n print('ham ham')\n\n\nclass Pisica:\n\n def sunet(self):\n print('miau')\n\n\ndef asculta_sunet(tipul_animalului):\n tipul_animalului.sunet()\n\n\n<mask token>\n",
"step-3": "class Caine:\n\n def sunet(self):\n print('ham ham')\n\n\nclass Pisica:\n\n def sunet(self):\n print('miau')\n\n\ndef asculta_sunet(tipul_animalului):\n tipul_animalului.sunet()\n\n\n<mask token>\nasculta_sunet(CaineObj)\nasculta_sunet(PisicaObj)\n",
"step-4": "class Caine:\n\n def sunet(self):\n print('ham ham')\n\n\nclass Pisica:\n\n def sunet(self):\n print('miau')\n\n\ndef asculta_sunet(tipul_animalului):\n tipul_animalului.sunet()\n\n\nCaineObj = Caine()\nPisicaObj = Pisica()\nasculta_sunet(CaineObj)\nasculta_sunet(PisicaObj)\n",
"step-5": "#recapitulare polimorfism\r\nclass Caine:\r\n def sunet(self):\r\n print(\"ham ham\")\r\nclass Pisica:\r\n def sunet(self):\r\n print(\"miau\")\r\ndef asculta_sunet(tipul_animalului):# astapta obiect tipul animalului\r\n tipul_animalului.sunet()#\r\nCaineObj=Caine()#dau obiect\r\nPisicaObj=Pisica()\r\n\r\nasculta_sunet(CaineObj)\r\nasculta_sunet(PisicaObj)\r\n\r\n\r\n\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw(world):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0, len(perfdata)):
separate_point = 15
if i < separate_point:
if i < 5:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft', color=(239, 145, 242))
else:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft')
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i -
separate_point + 1) * 30, alignment='midleft')
world.shouldRedraw = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event % 10 == 0:
world.sounds['uiaction'].play(0)
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw(world):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0, len(perfdata)):
separate_point = 15
if i < separate_point:
if i < 5:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft', color=(239, 145, 242))
else:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft')
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i -
separate_point + 1) * 30, alignment='midleft')
world.shouldRedraw = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event % 10 == 0:
world.sounds['uiaction'].play(0)
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw(world):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0, len(perfdata)):
separate_point = 15
if i < separate_point:
if i < 5:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft', color=(239, 145, 242))
else:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft')
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i -
separate_point + 1) * 30, alignment='midleft')
world.shouldRedraw = False
def enter(world):
world.state = states.Perf
world.configCatX = 0
world.configOptX = -1
world.shouldRedraw = True
<|reserved_special_token_1|>
import pygame, states, events
from settings import all as settings
import gui
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event % 10 == 0:
world.sounds['uiaction'].play(0)
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw(world):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0, len(perfdata)):
separate_point = 15
if i < separate_point:
if i < 5:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft', color=(239, 145, 242))
else:
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *
30, alignment='midleft')
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i -
separate_point + 1) * 30, alignment='midleft')
world.shouldRedraw = False
def enter(world):
world.state = states.Perf
world.configCatX = 0
world.configOptX = -1
world.shouldRedraw = True
<|reserved_special_token_1|>
import pygame, states, events
from settings import all as settings
import gui
def handleInput(world, event):
if event == events.btnSelectOn or event == events.btnEscapeOn:
bwd(world)
if event%10 == 0:
world.sounds['uiaction'].play(0)
# world.shouldRedraw = True
def bwd(world):
if world.state >= states.Config:
return left(world)
world.shouldRedraw = True
world.state = states.Intro
def draw( world ):
if not world.shouldRedraw:
return
r = world.worldsurf_rect
world.worldsurf.fill(world.bg_color)
perfdata = world.getperf()
for i in range(0,len(perfdata)):
separate_point = 15
if(i<separate_point):
if(i<5):
gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) * 30, alignment="midleft", color=(239,145,242))
else:
gui.simpleText(world, perfdata[i], r.centerx-350, (i+1)*30,alignment="midleft")
else:
gui.simpleText(world, perfdata[i], r.centerx - 50, (i-separate_point + 1) * 30, alignment="midleft")
world.shouldRedraw = False
def enter(world):
world.state = states.Perf
world.configCatX = 0
world.configOptX = -1
world.shouldRedraw = True
|
flexible
|
{
"blob_id": "8650e0f1e7f2ac42c3c78191f79810f5befc9f41",
"index": 3298,
"step-1": "<mask token>\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\ndef enter(world):\n world.state = states.Perf\n world.configCatX = 0\n world.configOptX = -1\n world.shouldRedraw = True\n",
"step-4": "import pygame, states, events\nfrom settings import all as settings\nimport gui\n\n\ndef handleInput(world, event):\n if event == events.btnSelectOn or event == events.btnEscapeOn:\n bwd(world)\n if event % 10 == 0:\n world.sounds['uiaction'].play(0)\n\n\ndef bwd(world):\n if world.state >= states.Config:\n return left(world)\n world.shouldRedraw = True\n world.state = states.Intro\n\n\ndef draw(world):\n if not world.shouldRedraw:\n return\n r = world.worldsurf_rect\n world.worldsurf.fill(world.bg_color)\n perfdata = world.getperf()\n for i in range(0, len(perfdata)):\n separate_point = 15\n if i < separate_point:\n if i < 5:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft', color=(239, 145, 242))\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) *\n 30, alignment='midleft')\n else:\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i -\n separate_point + 1) * 30, alignment='midleft')\n world.shouldRedraw = False\n\n\ndef enter(world):\n world.state = states.Perf\n world.configCatX = 0\n world.configOptX = -1\n world.shouldRedraw = True\n",
"step-5": "import pygame, states, events\r\nfrom settings import all as settings\r\n\r\nimport gui\r\n\r\ndef handleInput(world, event):\r\n if event == events.btnSelectOn or event == events.btnEscapeOn:\r\n bwd(world)\r\n\r\n if event%10 == 0:\r\n world.sounds['uiaction'].play(0)\r\n # world.shouldRedraw = True\r\n\r\n\r\ndef bwd(world):\r\n if world.state >= states.Config:\r\n return left(world)\r\n\r\n world.shouldRedraw = True\r\n world.state = states.Intro\r\n\r\n\r\ndef draw( world ):\r\n if not world.shouldRedraw:\r\n return\r\n\r\n r = world.worldsurf_rect\r\n world.worldsurf.fill(world.bg_color)\r\n\r\n perfdata = world.getperf()\r\n for i in range(0,len(perfdata)):\r\n separate_point = 15\r\n if(i<separate_point):\r\n if(i<5):\r\n gui.simpleText(world, perfdata[i], r.centerx - 350, (i + 1) * 30, alignment=\"midleft\", color=(239,145,242))\r\n else:\r\n gui.simpleText(world, perfdata[i], r.centerx-350, (i+1)*30,alignment=\"midleft\")\r\n else:\r\n gui.simpleText(world, perfdata[i], r.centerx - 50, (i-separate_point + 1) * 30, alignment=\"midleft\")\r\n world.shouldRedraw = False\r\n\r\ndef enter(world):\r\n world.state = states.Perf\r\n world.configCatX = 0\r\n world.configOptX = -1\r\n world.shouldRedraw = True\r\n\r\n\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# ------------------------------------------
#
# Project: VEXcode VR Maze Solver
# Author: Hyunwoo Choi
# Created: January 12 2021
# Description: Solves a VEXcode VR maze using the right hand rule
#
# ------------------------------------------
# Library imports
from vexcode import *
#main
def main():
#putting down the pen to show the path of the robot
pen.set_pen_color(BLUE)
pen.move(DOWN)
drivetrain.set_drive_velocity(50, PERCENT)
drivetrain.set_turn_velocity(50, PERCENT)
#start with 90 deg turned right since we are using a right hand rule to solve this maze
drivetrain.turn_for(RIGHT, 90, DEGREES)
#run
run()
#this method checks all three sides and returns a boolean for each side if it is blocked or not
def checkSides():
rightC, frontC, leftC = True, True, True
drivetrain.turn_for(RIGHT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
rightC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
frontC = False
drivetrain.turn_for(LEFT, 90, DEGREES)
if front_eye.near_object() and distance.get_distance(MM) < 3000:
leftC = False
drivetrain.turn_for(RIGHT, 90, DEGREES)
return rightC, frontC, leftC
#main run function
def run():
#program loop
while True:
#drive
drivetrain.drive_for(FORWARD, 250, MM)
#checks if the robot's surroundings are clear by using the method above
rightClear, frontClear, leftClear = checkSides()
#uses the 3 boolean values above to determine the which direction to turn
if frontClear and not rightClear:
print("")
elif rightClear:
drivetrain.turn_for(RIGHT, 90, DEGREES)
elif (not (rightClear and frontClear)) and leftClear:
drivetrain.turn_for(LEFT, 90, DEGREES)
elif not (rightClear and leftClear and frontClear):
drivetrain.turn_for(RIGHT, 180, DEGREES)
#if found an exit, stop
if(down_eye.detect(RED)):
break
wait(1,MSEC)
# VR threads — Do not delete
vr_thread(main())
|
normal
|
{
"blob_id": "e560f2f202e477822729d1361b8d7ef7831a00e6",
"index": 8339,
"step-1": "<mask token>\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-4": "from vexcode import *\n\n\ndef main():\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n run()\n\n\ndef checkSides():\n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n return rightC, frontC, leftC\n\n\ndef run():\n while True:\n drivetrain.drive_for(FORWARD, 250, MM)\n rightClear, frontClear, leftClear = checkSides()\n if frontClear and not rightClear:\n print('')\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif not (rightClear and frontClear) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n if down_eye.detect(RED):\n break\n wait(1, MSEC)\n\n\nvr_thread(main())\n",
"step-5": "# ------------------------------------------\n# \n# \tProject: VEXcode VR Maze Solver\n#\tAuthor: Hyunwoo Choi\n#\tCreated: January 12 2021\n#\tDescription: Solves a VEXcode VR maze using the right hand rule\n# \n# ------------------------------------------\n\n# Library imports\nfrom vexcode import *\n\n#main\ndef main():\n #putting down the pen to show the path of the robot\n pen.set_pen_color(BLUE)\n pen.move(DOWN)\n\n drivetrain.set_drive_velocity(50, PERCENT)\n drivetrain.set_turn_velocity(50, PERCENT)\n\n \n #start with 90 deg turned right since we are using a right hand rule to solve this maze\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n \n #run\n run()\n\n#this method checks all three sides and returns a boolean for each side if it is blocked or not\ndef checkSides():\n \n rightC, frontC, leftC = True, True, True\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n rightC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n frontC = False\n drivetrain.turn_for(LEFT, 90, DEGREES)\n if front_eye.near_object() and distance.get_distance(MM) < 3000:\n leftC = False\n \n drivetrain.turn_for(RIGHT, 90, DEGREES)\n\n return rightC, frontC, leftC\n\n#main run function\ndef run():\n #program loop\n while True:\n\n #drive\n drivetrain.drive_for(FORWARD, 250, MM)\n\n #checks if the robot's surroundings are clear by using the method above\n rightClear, frontClear, leftClear = checkSides()\n\n #uses the 3 boolean values above to determine the which direction to turn\n if frontClear and not rightClear:\n print(\"\")\n elif rightClear:\n drivetrain.turn_for(RIGHT, 90, DEGREES)\n elif (not (rightClear and frontClear)) and leftClear:\n drivetrain.turn_for(LEFT, 90, DEGREES)\n elif not (rightClear and leftClear and frontClear):\n drivetrain.turn_for(RIGHT, 180, DEGREES)\n\n #if found an exit, stop\n if(down_eye.detect(RED)):\n break\n\n wait(1,MSEC)\n\n \n \n# VR threads — Do not delete\nvr_thread(main())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Various utilities for neural networks implemented by Paddle. This code is rewritten based on:
https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py
"""
import math
import paddle
import paddle.nn as nn
class SiLU(nn.Layer):
def forward(self, x):
return x * nn.functional.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1D(*args, **kwargs)
elif dims == 2:
return nn.Conv2D(*args, **kwargs)
elif dims == 3:
return nn.Conv3D(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1D(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2D(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3D(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def update_ema(target_params, source_params, rate=0.99):
"""
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
"""
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(axis=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)
args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]
embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)
if dim % 2:
embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:, :1])], axis=-1)
return embedding
def checkpoint(func, inputs, params, flag):
"""
This function is disabled. And now just forward.
"""
return func(*inputs)
|
normal
|
{
"blob_id": "364d70fab02291bafadebea68fee94c0210e2de9",
"index": 4365,
"step-1": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\n<mask token>\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\n<mask token>\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=\n half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,\n :1])], axis=-1)\n return embedding\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f'unsupported dimensions: {dims}')\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=\n half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:,\n :1])], axis=-1)\n return embedding\n\n\ndef checkpoint(func, inputs, params, flag):\n \"\"\"\n This function is disabled. And now just forward.\n \"\"\"\n return func(*inputs)\n",
"step-5": "\"\"\"\nVarious utilities for neural networks implemented by Paddle. This code is rewritten based on:\nhttps://github.com/openai/guided-diffusion/blob/main/guided_diffusion/nn.py\n\"\"\"\nimport math\n\nimport paddle\nimport paddle.nn as nn\n\n\nclass SiLU(nn.Layer):\n\n def forward(self, x):\n return x * nn.functional.sigmoid(x)\n\n\nclass GroupNorm32(nn.GroupNorm):\n\n def forward(self, x):\n return super().forward(x)\n\n\ndef conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1D(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2D(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3D(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")\n\n\ndef linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)\n\n\ndef avg_pool_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D average pooling module.\n \"\"\"\n if dims == 1:\n return nn.AvgPool1D(*args, **kwargs)\n elif dims == 2:\n return nn.AvgPool2D(*args, **kwargs)\n elif dims == 3:\n return nn.AvgPool3D(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")\n\n\ndef update_ema(target_params, source_params, rate=0.99):\n \"\"\"\n Update target parameters to be closer to those of source parameters using\n an exponential moving average.\n\n :param target_params: the target parameter sequence.\n :param source_params: the source parameter sequence.\n :param rate: the EMA rate (closer to 1 means slower).\n \"\"\"\n for targ, src in zip(target_params, source_params):\n targ.detach().mul_(rate).add_(src, alpha=1 - rate)\n\n\ndef zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module\n\n\ndef scale_module(module, scale):\n \"\"\"\n Scale the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n\ndef mean_flat(tensor):\n \"\"\"\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(axis=list(range(1, len(tensor.shape))))\n\n\ndef normalization(channels):\n \"\"\"\n Make a standard normalization layer.\n\n :param channels: number of input channels.\n :return: an nn.Module for normalization.\n \"\"\"\n return GroupNorm32(32, channels)\n\n\ndef timestep_embedding(timesteps, dim, max_period=10000):\n \"\"\"\n Create sinusoidal timestep embeddings.\n\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n half = dim // 2\n freqs = paddle.exp(-math.log(max_period) * paddle.arange(start=0, end=half, dtype=paddle.float32) / half)\n args = paddle.cast(timesteps[:, None], 'float32') * freqs[None]\n embedding = paddle.concat([paddle.cos(args), paddle.sin(args)], axis=-1)\n if dim % 2:\n embedding = paddle.concat([embedding, paddle.zeros_like(embedding[:, :1])], axis=-1)\n return embedding\n\n\ndef checkpoint(func, inputs, params, flag):\n \"\"\"\n This function is disabled. And now just forward.\n \"\"\"\n return func(*inputs)\n",
"step-ids": [
9,
11,
12,
14,
16
]
}
|
[
9,
11,
12,
14,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ap.add_argument('-m', '--model', required=True, help=
'path to trained model model')
ap.add_argument('-l', '--categorybin', required=True, help=
'path to output category label binarizer')
ap.add_argument('-c', '--colorbin', required=True, help=
'path to output color label binarizer')
ap.add_argument('-i', '--image', required=True, help='path to input image')
<|reserved_special_token_0|>
print('[INFO] loading network...')
<|reserved_special_token_0|>
print('[INFO] classifying image...')
<|reserved_special_token_0|>
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,
255, 0), 2)
print('[INFO] {}'.format(categoryText))
print('[INFO] {}'.format(colorText))
cv2.imshow('Output', output)
cv2.waitKey(0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--model', required=True, help=
'path to trained model model')
ap.add_argument('-l', '--categorybin', required=True, help=
'path to output category label binarizer')
ap.add_argument('-c', '--colorbin', required=True, help=
'path to output color label binarizer')
ap.add_argument('-i', '--image', required=True, help='path to input image')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (96, 96))
image = image.astype('float') / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
print('[INFO] loading network...')
model = load_model(args['model'], custom_objects={'tf': tf})
categoryLB = pickle.loads(open(args['categorybin'], 'rb').read())
colorLB = pickle.loads(open(args['colorbin'], 'rb').read())
print('[INFO] classifying image...')
categoryProba, colorProba = model.predict(image)
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
categoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba
[0][categoryIdx] * 100)
colorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx
] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,
255, 0), 2)
print('[INFO] {}'.format(categoryText))
print('[INFO] {}'.format(colorText))
cv2.imshow('Output', output)
cv2.waitKey(0)
<|reserved_special_token_1|>
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import tensorflow as tf
import numpy as np
import argparse
import imutils
import pickle
import cv2
ap = argparse.ArgumentParser()
ap.add_argument('-m', '--model', required=True, help=
'path to trained model model')
ap.add_argument('-l', '--categorybin', required=True, help=
'path to output category label binarizer')
ap.add_argument('-c', '--colorbin', required=True, help=
'path to output color label binarizer')
ap.add_argument('-i', '--image', required=True, help='path to input image')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (96, 96))
image = image.astype('float') / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
print('[INFO] loading network...')
model = load_model(args['model'], custom_objects={'tf': tf})
categoryLB = pickle.loads(open(args['categorybin'], 'rb').read())
colorLB = pickle.loads(open(args['colorbin'], 'rb').read())
print('[INFO] classifying image...')
categoryProba, colorProba = model.predict(image)
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
categoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba
[0][categoryIdx] * 100)
colorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx
] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,
255, 0), 2)
print('[INFO] {}'.format(categoryText))
print('[INFO] {}'.format(colorText))
cv2.imshow('Output', output)
cv2.waitKey(0)
<|reserved_special_token_1|>
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import tensorflow as tf
import numpy as np
import argparse
import imutils
import pickle
import cv2
# USAGE
# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle
# --colorbin output/color_lb.pickle --image examples/black_dress.jpg
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to trained model model")
ap.add_argument("-l", "--categorybin", required=True, help="path to output category label binarizer")
ap.add_argument("-c", "--colorbin", required=True, help="path to output color label binarizer")
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# pre-process the image for classification
image = cv2.resize(image, (96, 96))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# load the trained convolutional neural network from disk, followed
# by the category and color label binarizers, respectively
print("[INFO] loading network...")
model = load_model(args["model"], custom_objects={"tf": tf})
categoryLB = pickle.loads(open(args["categorybin"], "rb").read())
colorLB = pickle.loads(open(args["colorbin"], "rb").read())
# classify the input image using Keras' multi-output functionality
print("[INFO] classifying image...")
(categoryProba, colorProba) = model.predict(image)
# find indexes of both the category and color outputs with the
# largest probabilities, then determine the corresponding class
# labels
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
# draw the category label and color label on the image
categoryText = "category: {} ({:.2f}%)".format(categoryLabel, categoryProba[0][categoryIdx] * 100)
colorText = "color: {} ({:.2f}%)".format(colorLabel, colorProba[0][colorIdx] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# display the predictions to the terminal as well
print("[INFO] {}".format(categoryText))
print("[INFO] {}".format(colorText))
# show the output image
cv2.imshow("Output", output)
cv2.waitKey(0)
|
flexible
|
{
"blob_id": "8ff9961c1415c04899bbc15ba64811a1b3ade262",
"index": 3082,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\n<mask token>\nprint('[INFO] loading network...')\n<mask token>\nprint('[INFO] classifying image...')\n<mask token>\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-4": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-5": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\n\n# USAGE\n# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle\n# --colorbin output/color_lb.pickle --image examples/black_dress.jpg\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to trained model model\")\nap.add_argument(\"-l\", \"--categorybin\", required=True, help=\"path to output category label binarizer\")\nap.add_argument(\"-c\", \"--colorbin\", required=True, help=\"path to output color label binarizer\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load the image\nimage = cv2.imread(args[\"image\"])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# pre-process the image for classification\nimage = cv2.resize(image, (96, 96))\nimage = image.astype(\"float\") / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\n\n# load the trained convolutional neural network from disk, followed\n# by the category and color label binarizers, respectively\nprint(\"[INFO] loading network...\")\nmodel = load_model(args[\"model\"], custom_objects={\"tf\": tf})\ncategoryLB = pickle.loads(open(args[\"categorybin\"], \"rb\").read())\ncolorLB = pickle.loads(open(args[\"colorbin\"], \"rb\").read())\n\n# classify the input image using Keras' multi-output functionality\nprint(\"[INFO] classifying image...\")\n(categoryProba, colorProba) = model.predict(image)\n\n# find indexes of both the category and color outputs with the\n# largest probabilities, then determine the corresponding class\n# labels\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\n\n# draw the category label and color label on the image\ncategoryText = \"category: {} ({:.2f}%)\".format(categoryLabel, categoryProba[0][categoryIdx] * 100)\ncolorText = \"color: {} ({:.2f}%)\".format(colorLabel, colorProba[0][colorIdx] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n# display the predictions to the terminal as well\nprint(\"[INFO] {}\".format(categoryText))\nprint(\"[INFO] {}\".format(colorText))\n\n# show the output image\ncv2.imshow(\"Output\", output)\ncv2.waitKey(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import time
import csv
import os
import pandas as pd
col_list1 = ["cardtype","username_opensea", "address", "username_game"]
df1 = pd.read_csv("profiles.csv", usecols=col_list1)
#
for j in range(0,len(df1) ): #usernames in opensea
print(j)
user=[]
proto=[]
purity=[]
card_name=[]
card_effect=[]
god=[]
rarity=[]
mana=[]
type=[]
set=[]
print(df1['address'][j])
url1 = "https://api.godsunchained.com/v0/card?user="+df1['address'][j]+"&perPage=150000"
print (url1)
response = requests.request("GET", url1)
data = response.json()
number_cards=data['total']
if number_cards!=0:
for i in range(0, number_cards):
user.append(data['records'][i]['user'])
proto.append(data['records'][i]['proto'])
url2 = "https://api.godsunchained.com/v0/proto/" + str(proto[i])
purity.append(data['records'][i]['purity'])
# response2 = requests.request("GET", url2)
# data2 = response2.json()
# if data2['name']!=None:
# card_name.append(data2['name'])
# card_effect.append(data2['effect'])
# god.append(data2['god'])
# rarity.append(data2['rarity'])
# mana.append(data2['god'])
# type.append(data2['type'])
# set.append(data2['set'])
# else:
# card_name.append(None)
# card_effect.append(None)
# god.append(None)
# rarity.append(None)
# mana.append(None)
# type.append(None)
# set.append(None)
dict={
'user': user,
'proto_number': proto,
# 'card_name':card_name,
'purity': purity,
# 'card_effect': card_effect,
# 'god':god,
# 'rarity':rarity,
# 'mana': mana,
# 'type': type,
# 'set': set
}
df = pd.DataFrame(dict)
path = 'C:\\Users\\...'
df.to_csv(os.path.join(path, str(user[0]) + ".csv"), index=False)
|
normal
|
{
"blob_id": "93909ab98f1141940e64e079e09834ae5ad3995f",
"index": 6537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor j in range(0, len(df1)):\n print(j)\n user = []\n proto = []\n purity = []\n card_name = []\n card_effect = []\n god = []\n rarity = []\n mana = []\n type = []\n set = []\n print(df1['address'][j])\n url1 = 'https://api.godsunchained.com/v0/card?user=' + df1['address'][j\n ] + '&perPage=150000'\n print(url1)\n response = requests.request('GET', url1)\n data = response.json()\n number_cards = data['total']\n if number_cards != 0:\n for i in range(0, number_cards):\n user.append(data['records'][i]['user'])\n proto.append(data['records'][i]['proto'])\n url2 = 'https://api.godsunchained.com/v0/proto/' + str(proto[i])\n purity.append(data['records'][i]['purity'])\n dict = {'user': user, 'proto_number': proto, 'purity': purity}\n df = pd.DataFrame(dict)\n path = 'C:\\\\Users\\\\...'\n df.to_csv(os.path.join(path, str(user[0]) + '.csv'), index=False)\n",
"step-3": "<mask token>\ncol_list1 = ['cardtype', 'username_opensea', 'address', 'username_game']\ndf1 = pd.read_csv('profiles.csv', usecols=col_list1)\nfor j in range(0, len(df1)):\n print(j)\n user = []\n proto = []\n purity = []\n card_name = []\n card_effect = []\n god = []\n rarity = []\n mana = []\n type = []\n set = []\n print(df1['address'][j])\n url1 = 'https://api.godsunchained.com/v0/card?user=' + df1['address'][j\n ] + '&perPage=150000'\n print(url1)\n response = requests.request('GET', url1)\n data = response.json()\n number_cards = data['total']\n if number_cards != 0:\n for i in range(0, number_cards):\n user.append(data['records'][i]['user'])\n proto.append(data['records'][i]['proto'])\n url2 = 'https://api.godsunchained.com/v0/proto/' + str(proto[i])\n purity.append(data['records'][i]['purity'])\n dict = {'user': user, 'proto_number': proto, 'purity': purity}\n df = pd.DataFrame(dict)\n path = 'C:\\\\Users\\\\...'\n df.to_csv(os.path.join(path, str(user[0]) + '.csv'), index=False)\n",
"step-4": "import requests\nimport time\nimport csv\nimport os\nimport pandas as pd\ncol_list1 = ['cardtype', 'username_opensea', 'address', 'username_game']\ndf1 = pd.read_csv('profiles.csv', usecols=col_list1)\nfor j in range(0, len(df1)):\n print(j)\n user = []\n proto = []\n purity = []\n card_name = []\n card_effect = []\n god = []\n rarity = []\n mana = []\n type = []\n set = []\n print(df1['address'][j])\n url1 = 'https://api.godsunchained.com/v0/card?user=' + df1['address'][j\n ] + '&perPage=150000'\n print(url1)\n response = requests.request('GET', url1)\n data = response.json()\n number_cards = data['total']\n if number_cards != 0:\n for i in range(0, number_cards):\n user.append(data['records'][i]['user'])\n proto.append(data['records'][i]['proto'])\n url2 = 'https://api.godsunchained.com/v0/proto/' + str(proto[i])\n purity.append(data['records'][i]['purity'])\n dict = {'user': user, 'proto_number': proto, 'purity': purity}\n df = pd.DataFrame(dict)\n path = 'C:\\\\Users\\\\...'\n df.to_csv(os.path.join(path, str(user[0]) + '.csv'), index=False)\n",
"step-5": "import requests\r\nimport time\r\nimport csv\r\nimport os\r\nimport pandas as pd\r\n\r\ncol_list1 = [\"cardtype\",\"username_opensea\", \"address\", \"username_game\"]\r\ndf1 = pd.read_csv(\"profiles.csv\", usecols=col_list1)\r\n\r\n\r\n\r\n#\r\nfor j in range(0,len(df1) ): #usernames in opensea\r\n print(j)\r\n user=[]\r\n proto=[]\r\n purity=[]\r\n card_name=[]\r\n card_effect=[]\r\n god=[]\r\n rarity=[]\r\n mana=[]\r\n type=[]\r\n set=[]\r\n\r\n print(df1['address'][j])\r\n\r\n url1 = \"https://api.godsunchained.com/v0/card?user=\"+df1['address'][j]+\"&perPage=150000\"\r\n print (url1)\r\n response = requests.request(\"GET\", url1)\r\n data = response.json()\r\n\r\n\r\n number_cards=data['total']\r\n if number_cards!=0:\r\n for i in range(0, number_cards):\r\n user.append(data['records'][i]['user'])\r\n proto.append(data['records'][i]['proto'])\r\n url2 = \"https://api.godsunchained.com/v0/proto/\" + str(proto[i])\r\n \r\n purity.append(data['records'][i]['purity'])\r\n \r\n # response2 = requests.request(\"GET\", url2)\r\n # data2 = response2.json()\r\n \r\n # if data2['name']!=None:\r\n # card_name.append(data2['name'])\r\n # card_effect.append(data2['effect'])\r\n # god.append(data2['god'])\r\n # rarity.append(data2['rarity'])\r\n # mana.append(data2['god'])\r\n # type.append(data2['type'])\r\n # set.append(data2['set'])\r\n # else:\r\n # card_name.append(None)\r\n # card_effect.append(None)\r\n # god.append(None)\r\n # rarity.append(None)\r\n # mana.append(None)\r\n # type.append(None)\r\n # set.append(None)\r\n \r\n \r\n dict={\r\n 'user': user,\r\n 'proto_number': proto,\r\n # 'card_name':card_name,\r\n 'purity': purity,\r\n # 'card_effect': card_effect,\r\n # 'god':god,\r\n # 'rarity':rarity,\r\n # 'mana': mana,\r\n # 'type': type,\r\n # 'set': set\r\n }\r\n \r\n df = pd.DataFrame(dict)\r\n \r\n path = 'C:\\\\Users\\\\...'\r\n df.to_csv(os.path.join(path, str(user[0]) + \".csv\"), index=False)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID', 'returnType', 'sender',
'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ''
try:
val = conditions[cond]
except KeyError:
val = ''
if '|' in val:
i = 0
dep = list()
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['other']:
cond = 'serials'
val = conditions['other']
if '|' in val:
i = 0
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append('logTime>= ? AND logTime<= ?')
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append('logTime>= ?')
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append('logTime <= ?')
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'
, cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute('INSERT INTO ' + self.TABLE_NAME +
' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
<|reserved_special_token_0|>
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'
)
else:
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
<|reserved_special_token_0|>
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'
x = c.execute(sql_str, [str(log_id)])
return x
<|reserved_special_token_0|>
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = (
'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'
)
c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +
' WHERE logID=' + logID, lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open('resources/files/serials.csv', 'w'))
@staticmethod
def read_dic():
""" reads the dictionary of serials """
dic = OrdDic()
dic.update(json.load(open('resources/files/serials.csv', 'r')))
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open('resources/files/locations.txt', 'r', newline='\n')
locations = r.read().split('\n')
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open('resources/files/locations.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open('resources/files/callsigns.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open('resources/files/callsigns.txt', 'r', newline='\n')
callsigns = r.read().split('\n')
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open('resources/files/settings.txt', 'r')))
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open('resources/files/settings.txt', 'w'))
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']
lst = []
for key in main_keys:
lst.append(log[key])
log.pop(key)
lst.append(json.dumps(log))
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
except TypeError:
pass
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ''
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
<|reserved_special_token_0|>
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MinifyFilesPre:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID', 'returnType', 'sender',
'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ''
try:
val = conditions[cond]
except KeyError:
val = ''
if '|' in val:
i = 0
dep = list()
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['other']:
cond = 'serials'
val = conditions['other']
if '|' in val:
i = 0
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append('logTime>= ? AND logTime<= ?')
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append('logTime>= ?')
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append('logTime <= ?')
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'
, cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def count_records(self):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
results = c.execute(f"SELECT COUNT('LogID') FROM {self.TABLE_NAME}"
)
return results
def create_game_table(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE `{self.TABLE_NAME}` (
`GameID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Name` TEXT DEFAULT '?',
`Rank` TEXT DEFAULT '?',
`Pl` TEXT DEFAULT '?',
`Score` INTEGER DEFAULT 0,
`Time` INTEGER
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute('INSERT INTO ' + self.TABLE_NAME +
' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
def delete_return_byID(self, id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'
)
else:
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
def read_game_score(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'
)
else:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_game_table(self)
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'
x = c.execute(sql_str, [str(log_id)])
return x
def get_first_index(self):
with sqlite3.connect(self.FILE_NAME) as conn:
i = ''
c = conn.cursor()
sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +
' WHERE logID = (SELECT MAX(logID) FROM ' + self.
TABLE_NAME + ')')
x = c.execute(sqlStr)
for i in x:
i = int(list(i)[0])
try:
return i
except UnboundLocalError:
return ''
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = (
'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'
)
c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +
' WHERE logID=' + logID, lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open('resources/files/serials.csv', 'w'))
@staticmethod
def read_dic():
""" reads the dictionary of serials """
dic = OrdDic()
dic.update(json.load(open('resources/files/serials.csv', 'r')))
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open('resources/files/locations.txt', 'r', newline='\n')
locations = r.read().split('\n')
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open('resources/files/locations.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open('resources/files/callsigns.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open('resources/files/callsigns.txt', 'r', newline='\n')
callsigns = r.read().split('\n')
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open('resources/files/settings.txt', 'r')))
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open('resources/files/settings.txt', 'w'))
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']
lst = []
for key in main_keys:
lst.append(log[key])
log.pop(key)
lst.append(json.dumps(log))
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
except TypeError:
pass
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ''
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
<|reserved_special_token_0|>
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MinifyFilesPre:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def min_js_file(file_name):
js = jsmin(open(file_name, newline='\n').read())
open(file_name, 'w', newline='\n').write(js)
@staticmethod
def min_css_file(file_name):
css = compress(open(file_name, newline='\n').read())
open(file_name[:-4] + '.min.css', 'w', newline='\n').write(css)
<|reserved_special_token_0|>
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID', 'returnType', 'sender',
'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ''
try:
val = conditions[cond]
except KeyError:
val = ''
if '|' in val:
i = 0
dep = list()
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['other']:
cond = 'serials'
val = conditions['other']
if '|' in val:
i = 0
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append('logTime>= ? AND logTime<= ?')
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append('logTime>= ?')
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append('logTime <= ?')
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'
, cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def count_records(self):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
results = c.execute(f"SELECT COUNT('LogID') FROM {self.TABLE_NAME}"
)
return results
def create_game_table(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE `{self.TABLE_NAME}` (
`GameID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Name` TEXT DEFAULT '?',
`Rank` TEXT DEFAULT '?',
`Pl` TEXT DEFAULT '?',
`Score` INTEGER DEFAULT 0,
`Time` INTEGER
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute('INSERT INTO ' + self.TABLE_NAME +
' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
def delete_return_byID(self, id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'
)
else:
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
def read_game_score(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'
)
else:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_game_table(self)
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'
x = c.execute(sql_str, [str(log_id)])
return x
def get_first_index(self):
with sqlite3.connect(self.FILE_NAME) as conn:
i = ''
c = conn.cursor()
sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +
' WHERE logID = (SELECT MAX(logID) FROM ' + self.
TABLE_NAME + ')')
x = c.execute(sqlStr)
for i in x:
i = int(list(i)[0])
try:
return i
except UnboundLocalError:
return ''
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = (
'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'
)
c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +
' WHERE logID=' + logID, lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open('resources/files/serials.csv', 'w'))
@staticmethod
def read_dic():
""" reads the dictionary of serials """
dic = OrdDic()
dic.update(json.load(open('resources/files/serials.csv', 'r')))
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open('resources/files/locations.txt', 'r', newline='\n')
locations = r.read().split('\n')
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open('resources/files/locations.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open('resources/files/callsigns.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open('resources/files/callsigns.txt', 'r', newline='\n')
callsigns = r.read().split('\n')
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open('resources/files/settings.txt', 'r')))
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open('resources/files/settings.txt', 'w'))
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']
lst = []
for key in main_keys:
lst.append(log[key])
log.pop(key)
lst.append(json.dumps(log))
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
except TypeError:
pass
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ''
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
<|reserved_special_token_0|>
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MinifyFilesPre:
def __init__(self, merge=False):
file_names = glob('resources/static/js_files/*.js')
file_names.remove('resources/static/js_files/full_version.js')
self.file_names = file_names
self.merge = merge
self.js = ''
def save(self):
"""combines several js files together, with optional minification"""
with open('resources/static/js_files/full_version.js', 'w', newline
='\n') as w:
w.write(self.js)
def js_merge(self):
"""saves minified version to a single one"""
if self.merge:
js = ''
for file_name in self.file_names:
try:
js += jsmin(open(file_name, newline='\n').read())
except FileNotFoundError:
print(f'The file {file_name} could not be found')
self.js = jsmin(js)
else:
for file_name in self.file_names:
js = jsmin(open(file_name, newline='\n').read())
open(file_name, 'w', newline='\n').write(js)
@staticmethod
def min_js_file(file_name):
js = jsmin(open(file_name, newline='\n').read())
open(file_name, 'w', newline='\n').write(js)
@staticmethod
def min_css_file(file_name):
css = compress(open(file_name, newline='\n').read())
open(file_name[:-4] + '.min.css', 'w', newline='\n').write(css)
@staticmethod
def get_js_files():
file_names = glob('resources/static/js_files/*.js')
file_names.remove('resources/static/js_files/full_version.js')
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID', 'returnType', 'sender',
'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ''
try:
val = conditions[cond]
except KeyError:
val = ''
if '|' in val:
i = 0
dep = list()
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['other']:
cond = 'serials'
val = conditions['other']
if '|' in val:
i = 0
for sub_val in val.split('|'):
i += 1
cond_list.append(f'%{sub_val}%')
cond_string_list.append('(' +
f'lower({cond}) LIKE ?' +
f' OR lower({cond}) LIKE ?' * (i - 1) + ')')
else:
for sub_val in val.split(', '):
cond_string_list.append(f'lower({cond}) LIKE ?')
sub_val = f'%{sub_val.lower()}%'
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append('logTime>= ? AND logTime<= ?')
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append('logTime>= ?')
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append('logTime <= ?')
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'
, cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def count_records(self):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
results = c.execute(f"SELECT COUNT('LogID') FROM {self.TABLE_NAME}"
)
return results
def create_game_table(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
try:
c.execute(
f"""CREATE TABLE `{self.TABLE_NAME}` (
`GameID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Name` TEXT DEFAULT '?',
`Rank` TEXT DEFAULT '?',
`Pl` TEXT DEFAULT '?',
`Score` INTEGER DEFAULT 0,
`Time` INTEGER
);"""
)
conn.commit()
except sqlite3.OperationalError:
print('The Db already exists')
if ret:
return self.read_return()
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute('INSERT INTO ' + self.TABLE_NAME +
' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
def delete_return_byID(self, id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'
)
else:
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
def read_game_score(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'
)
else:
results = c.execute(
f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_game_table(self)
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'
x = c.execute(sql_str, [str(log_id)])
return x
def get_first_index(self):
with sqlite3.connect(self.FILE_NAME) as conn:
i = ''
c = conn.cursor()
sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +
' WHERE logID = (SELECT MAX(logID) FROM ' + self.
TABLE_NAME + ')')
x = c.execute(sqlStr)
for i in x:
i = int(list(i)[0])
try:
return i
except UnboundLocalError:
return ''
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = (
'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'
)
c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +
' WHERE logID=' + logID, lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open('resources/files/serials.csv', 'w'))
@staticmethod
def read_dic():
""" reads the dictionary of serials """
dic = OrdDic()
dic.update(json.load(open('resources/files/serials.csv', 'r')))
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open('resources/files/locations.txt', 'r', newline='\n')
locations = r.read().split('\n')
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open('resources/files/locations.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open('resources/files/callsigns.txt', 'w', newline='\n')
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open('resources/files/callsigns.txt', 'r', newline='\n')
callsigns = r.read().split('\n')
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open('resources/files/settings.txt', 'r')))
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open('resources/files/settings.txt', 'w'))
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']
lst = []
for key in main_keys:
lst.append(log[key])
log.pop(key)
lst.append(json.dumps(log))
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
except TypeError:
pass
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ''
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
local_log.append(ret)
except TypeError:
print('none value in db')
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
def fix_time(dtg):
if len(str(dtg)) == 6:
return str(dtg)
else:
return str(f'0{dtg}')
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from csv import reader, writer
from collections import OrderedDict as OrdDic
import sqlite3
from jsmin import jsmin
from glob import glob
from csscompressor import compress
from threading import Timer
from glob import glob
import os
import shutil
import logging
import json
class MinifyFilesPre:
def __init__(self, merge=False):
file_names = glob("resources/static/js_files/*.js")
file_names.remove("resources/static/js_files/full_version.js")
self.file_names = file_names
self.merge = merge
self.js = ""
def save(self):
"""combines several js files together, with optional minification"""
with open("resources/static/js_files/full_version.js", 'w', newline="\n") as w:
w.write(self.js)
def js_merge(self):
"""saves minified version to a single one"""
if self.merge:
js = ""
for file_name in self.file_names:
try:
js += jsmin(open(file_name, newline="\n").read())
except FileNotFoundError:
print(f"The file {file_name} could not be found")
self.js = jsmin(js)
else:
for file_name in self.file_names:
js = jsmin(open(file_name, newline="\n").read())
open(file_name, 'w', newline="\n").write(js)
@staticmethod
def min_js_file(file_name):
js = jsmin(open(file_name, newline="\n").read())
open(file_name, 'w', newline="\n").write(js)
@staticmethod
def min_css_file(file_name):
css = compress(open(file_name, newline="\n").read())
open(file_name[:-4] + '.min.css', 'w', newline="\n").write(css)
@staticmethod
def get_js_files():
file_names = glob("resources/static/js_files/*.js")
file_names.remove("resources/static/js_files/full_version.js")
class DbManager:
def __init__(self, fname=None, tname=None):
if fname:
self.FILE_NAME = fname
else:
self.FILE_NAME = 'resources/static/LOG_Temp.db'
if tname:
self.TABLE_NAME = tname
else:
self.TABLE_NAME = "'LOG_RETURNS'"
def query_data(self, conditions, entries):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
condition_order = ['logID',
'returnType',
'sender',
'reciever',
'logTime',
'dutyOfficer',
'net',
'serials']
cond_list = []
cond_string_list = []
for cond in condition_order:
val = ""
try:
val = conditions[cond]
except KeyError:
val = ""
if "|" in val:
i = 0
dep = list()
for sub_val in val.split("|"):
i+=1
cond_list.append(f"%{sub_val}%")
cond_string_list.append("("+f"lower({cond}) LIKE ?"+ f" OR lower({cond}) LIKE ?"*(i-1)+")")
else:
for sub_val in val.split(", "):
cond_string_list.append(f"lower({cond}) LIKE ?")
sub_val = f"%{sub_val.lower()}%"
cond_list.append(sub_val)
if conditions['other']:
cond = "serials"
val = conditions['other']
if "|" in val:
i = 0
for sub_val in val.split("|"):
i+=1
cond_list.append(f"%{sub_val}%")
cond_string_list.append("("+f"lower({cond}) LIKE ?"+ f" OR lower({cond}) LIKE ?"*(i-1)+")")
else:
for sub_val in val.split(", "):
cond_string_list.append(f"lower({cond}) LIKE ?")
sub_val = f"%{sub_val.lower()}%"
cond_list.append(sub_val)
if conditions['logTimeFrom']:
if conditions['logTimeTo']:
cond_string_list.append("logTime>= ? AND logTime<= ?")
cond_list.append(conditions['logTimeFrom'])
cond_list.append(conditions['logTimeTo'])
else:
cond_string_list.append("logTime>= ?")
cond_list.append(conditions['logTimeFrom'])
elif conditions['logTimeTo']:
cond_string_list.append("logTime <= ?")
cond_list.append(conditions['logTimeTo'])
cond_string = ' AND '.join(cond_string_list)
print(cond_string)
print(cond_list)
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} WHERE "
f"{cond_string}"
f" ORDER BY logID DESC LIMIT {entries}", cond_list)
return results
except sqlite3.OperationalError as e:
print(e)
def create_db(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
# Create table
try:
c.execute(f'''CREATE TABLE {self.TABLE_NAME} (
`logID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`returnType` text,
`sender` text,
`reciever` text,
`logTime` integer,
`dutyOfficer` text,
`net` TEXT,
`serials` text
);''')
conn.commit()
except sqlite3.OperationalError:
print("The Db already exists")
if ret:
return self.read_return()
def count_records(self):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
results = c.execute(f"SELECT COUNT('LogID') FROM {self.TABLE_NAME}")
return results
def create_game_table(self, ret=False):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
# Create table
try:
c.execute(f'''CREATE TABLE `{self.TABLE_NAME}` (
`GameID` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
`Name` TEXT DEFAULT '?',
`Rank` TEXT DEFAULT '?',
`Pl` TEXT DEFAULT '?',
`Score` INTEGER DEFAULT 0,
`Time` INTEGER
);''')
conn.commit()
except sqlite3.OperationalError:
print("The Db already exists")
if ret:
return self.read_return()
def new_return(self, lst):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(
'INSERT INTO ' + self.TABLE_NAME + ' VALUES (NULL,' +
'?, ' * (len(lst) - 1) + '?)',
lst)
except sqlite3.OperationalError as e:
print(e)
"""
if 'no such table' in str(e):
if "game" in str(self.FILE_NAME):
print("MEME")
self.create_game_table()
else:
self.create_db()
self.new_return(lst)
"""
def delete_return_byID(self, id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
c.execute(f"DELETE FROM {self.TABLE_NAME} WHERE logID = {id}")
def read_return(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}")
else:
# should not be used but just here just in case
results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_db(self)
def read_game_score(self, entries=None):
try:
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
if entries:
results = c.execute(f"SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}")
else:
# should not be used but just here just in case
results = c.execute(f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')
return results
except sqlite3.OperationalError as e:
if 'no such table' in str(e):
DbManager.create_game_table(self)
def find_index(self, log_id):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
sql_str = ("""SELECT * FROM """ +
self.TABLE_NAME +
""" WHERE logID=?""")
x = c.execute(sql_str, [str(log_id)])
return x
def get_first_index(self):
with sqlite3.connect(self.FILE_NAME) as conn:
i=""
c = conn.cursor()
sqlStr = ("""SELECT logID FROM """ +
self.TABLE_NAME +
""" WHERE logID = (SELECT MAX(logID) FROM """ +
self.TABLE_NAME + ")")
x = c.execute(sqlStr)
for i in x:
i = int(list(i)[0])
try:
return i
except UnboundLocalError:
return ""
def update_record(self, lst, logID):
with sqlite3.connect(self.FILE_NAME) as conn:
c = conn.cursor()
rowData = """returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?"""
c.execute(
'UPDATE ' + self.TABLE_NAME + ' SET ' + rowData + ' WHERE logID=' + logID,
lst)
class File:
@staticmethod
def db_connect(sets):
try:
fname = sets['DB_FILE_NAME']
except KeyError:
fname = None
try:
tname = sets['DB_TABLE_NAME']
except KeyError:
tname = None
conn = DbManager(fname=fname, tname=tname)
return conn
@staticmethod
def generate_css_min():
MinifyFilesPre.min_css_file('resources/static/styles/main.css')
@staticmethod
def pre_merge(merge=False):
if merge:
tmp_file = MinifyFilesPre()
tmp_file.js_merge()
tmp_file.save()
else:
MinifyFilesPre.get_js_files()
@staticmethod
def get_first(self):
return self.get_first_index()
@staticmethod
def save_dic(dic):
""" Saves the given dictionary of serials to a file """
json.dump(dic, open("resources/files/serials.csv", "w"))
# w = writer(open("resources/files/serials.csv", "w", newline="\n"))
# w.writerow(['Return Name', 'Serials'])
# for name, serials in dic.items():
# lst = []
# if name == "Return Name":
# lst.append(name)
# lst.append(serials)
# else:
# for serial in serials:
# if serial == "Return Name":
# lst.append(serials)
# else:
# inner_lst = []
# for cont in serials[serial]:
# if cont == "options":
# inner_lst.append(cont + ";;@@;;" +
# ";;##;;".join(
# serials
# [serial]
# ["options"]))
# else:
# inner_lst.append(
# cont + ";;@@;;" + serials[serial][cont])
# lst.append(serial + ';;:::;;' + ";;!!!;;".join(inner_lst))
# w.writerow([(name), (';;,,,;;'.join(lst))])
@staticmethod
def read_dic():
""" reads the dictionary of serials """
# should return the original format
dic = OrdDic()
dic.update(json.load(open("resources/files/serials.csv", "r")))
# OLD CODE
# logging.log(logging.INFO, "File path: "+os.path.realpath(__file__))
# r = reader(open("resources/files/serials.csv", "r", newline="\n"))
# i = 0
# for row in r:
# if i:
# inner_dic = OrdDic()
# for serial in row[1].split(';;,,,;;'):
# serial = serial.split(';;:::;;')
# sub_dic = OrdDic()
# for sub_serial in serial[1].split(';;!!!;;'):
# sub_serial = sub_serial.split(";;@@;;")
# if sub_serial[0] == 'options':
# options = sub_serial[1].split(";;##;;")
# sub_dic.update({sub_serial[0]: options})
# else:
# sub_dic.update(
# {sub_serial[0]: sub_serial[1]})
# inner_dic.update({serial[0]: sub_dic})
# # lst = row[1].split('\\')
# dic.update({row[0]: inner_dic})
# else:
# i += 1
# # print(" * Read Dictionary")
return dic
@staticmethod
def read_legacy():
""" Depreciated reads the dictionary and returns it in the legacy format """
serials = File.read_dic()
final_dic = OrdDic()
for name, dic in serials.items():
inner_dic = OrdDic()
for serial in dic:
inner_dic.update({serial: dic[serial]['desc']})
final_dic.update({name: inner_dic})
return final_dic
@staticmethod
def read_locations():
""" reads the file containing the locations """
r = open("resources/files/locations.txt", "r", newline="\n")
locations = r.read().split("\n")
return locations
@staticmethod
def save_Locations(lst):
lst = '\n'.join(lst)
w = open("resources/files/locations.txt", "w", newline="\n")
w.write(lst)
@staticmethod
def save_callsigns(lst):
lst = '\n'.join(lst)
w = open("resources/files/callsigns.txt", "w", newline="\n")
w.write(lst)
@staticmethod
def read_callsigns():
""" reads the file containing the callsigns """
r = open("resources/files/callsigns.txt", "r", newline="\n")
callsigns = r.read().split("\n")
return callsigns
@staticmethod
def read_settings():
""" reads the settings from file """
settings = OrdDic()
settings.update(json.load(open("resources/files/settings.txt", "r")))
## OLD WAY BELOW
#r = open("resources/files/settings.txt", "r", newline="\n")
# for option in r.read().split('\n'):
# try:
# #option = option.split('\\')
# #settings.update({option[0]: option[1]})
# # settings.update(json.loads(option))
# except IndexError:
# pass
return settings
@staticmethod
def save_settings(dic):
""" saves the given settings (dictionary) to file """
json.dump(dic, open("resources/files/settings.txt", "w"))
# LEGACY
# with open("resources/files/settings.txt", "w", newline="\n") as w:
# for sett, val in dic.items():
# w.write(sett + '\\' + val + '\n')
@staticmethod
def save_log(self, log, update=False):
""" Saves the log to file """
main_keys = [
'name',
'sender',
'receiver',
'time',
'duty',
'net'
]
# print(test)
lst = []
for key in main_keys:
# print(key)
lst.append(log[key])
log.pop(key)
# LEGACY
# inn_lst = []
# for serial, val in log.items():
# if not (serial in main_keys):
# inn_lst.append(serial + '\\' + val)
# lst.append('||'.join(inn_lst))
lst.append(json.dumps(log))
# print(lst)
if update:
self.update_record(lst, log['logID'])
else:
self.new_return(lst)
@staticmethod
def load_log_query(Db, query):
x = list(Db.query_data(query, 100))
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': row[4]})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
local_log.append(ret)
except TypeError:
print("none value in db")
return local_log
@staticmethod
def load_log(Db, log_id=None):
""" loads the log file """
# try:
# r = reader(open("resources/static/logs.csv", "r"))
# except FileNotFoundError:
# w = open("resources/static/logs.csv", 'w')
# w.close()
# r = reader(open("resources/static/logs.csv", "r"))
if log_id:
row = Db.find_index(log_id).fetchone()
local_log = list()
ret = None
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
except TypeError:
pass # This is handled upon return (it returns None type)
return ret
else:
try:
x = list(Db.read_return(entries=100))
except TypeError:
x = ""
local_log = list()
for row in x:
row = list(row)
try:
ret = OrdDic()
ret.update({'logID': row[0]})
ret.update({'name': row[1]})
ret.update({'sender': row[2]})
ret.update({'receiver': row[3]})
ret.update({'time': fix_time(row[4])})
ret.update({'duty': row[5]})
ret.update({'net': row[6]})
ret.update(json.loads(row[7]))
# LEGACY
# for serial_data in row[7:]:
# try:
# for serial in serial_data.split('||'):
# ser, val = serial.split('\\')
# val = "" + val
# ret.update({ser: str(val)})
# except AttributeError:
# print('The Db structure is incorrect')
local_log.append(ret)
except TypeError:
print("none value in db")
return local_log
@staticmethod
def delete_log_byID(Db, id):
Db.delete_return_byID(id)
def fix_time(dtg):
if len(str(dtg)) == 6:
return str(dtg)
else:
return str(f'0{dtg}')
class SaveTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def backup():
files = glob("/Volumes/*")
rem = ["/Volumes/student", "/Volumes/com.apple.TimeMachine.localsnapshots", "/Volumes/Macintosh HD", "Blah"]
for path in rem:
try:
files.remove(path)
except ValueError:
pass
usb = None
for path in files:
if "CP" in path:
usb = os.path.join(path, "Backup")
break
else:
usb = None
print("No Backup USB found")
if usb:
# save to usb
print("Saving...", end=" ")
save(os.path.join(usb, "files"), "resources/files")
save(os.path.join(usb, "db"), "resources/static/db")
print("Saved")
def save(dest, src):
if not os.path.exists(dest):
os.makedirs(dest)
copytree(src, dest)
if __name__ == '__main__':
pass
# File.pre_merge()
# settings = File.read_settings()
# File.save_settings(settings)
# File.read_locations()
# File.read_callsigns()
# File.save_dic()
# File.read_dic()
# x = file()
# x.save()
|
flexible
|
{
"blob_id": "38bd9e5b2147838b6061925d72b989c83343f1c2",
"index": 9800,
"step-1": "<mask token>\n\n\nclass DbManager:\n\n def __init__(self, fname=None, tname=None):\n if fname:\n self.FILE_NAME = fname\n else:\n self.FILE_NAME = 'resources/static/LOG_Temp.db'\n if tname:\n self.TABLE_NAME = tname\n else:\n self.TABLE_NAME = \"'LOG_RETURNS'\"\n\n def query_data(self, conditions, entries):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n condition_order = ['logID', 'returnType', 'sender',\n 'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']\n cond_list = []\n cond_string_list = []\n for cond in condition_order:\n val = ''\n try:\n val = conditions[cond]\n except KeyError:\n val = ''\n if '|' in val:\n i = 0\n dep = list()\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['other']:\n cond = 'serials'\n val = conditions['other']\n if '|' in val:\n i = 0\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['logTimeFrom']:\n if conditions['logTimeTo']:\n cond_string_list.append('logTime>= ? AND logTime<= ?')\n cond_list.append(conditions['logTimeFrom'])\n cond_list.append(conditions['logTimeTo'])\n else:\n cond_string_list.append('logTime>= ?')\n cond_list.append(conditions['logTimeFrom'])\n elif conditions['logTimeTo']:\n cond_string_list.append('logTime <= ?')\n cond_list.append(conditions['logTimeTo'])\n cond_string = ' AND '.join(cond_string_list)\n print(cond_string)\n print(cond_list)\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'\n , cond_list)\n return results\n except sqlite3.OperationalError as e:\n print(e)\n\n def create_db(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE {self.TABLE_NAME} (\n `logID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `returnType`\ttext,\n `sender`\ttext,\n `reciever`\ttext,\n `logTime`\tinteger,\n `dutyOfficer`\ttext,\n `net`\tTEXT,\n `serials`\ttext\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n <mask token>\n <mask token>\n\n def new_return(self, lst):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute('INSERT INTO ' + self.TABLE_NAME +\n ' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)\n except sqlite3.OperationalError as e:\n print(e)\n \"\"\"\n if 'no such table' in str(e):\n if \"game\" in str(self.FILE_NAME):\n print(\"MEME\")\n self.create_game_table()\n else:\n self.create_db()\n self.new_return(lst)\n \"\"\"\n <mask token>\n\n def read_return(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'\n )\n else:\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_db(self)\n <mask token>\n\n def find_index(self, log_id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'\n x = c.execute(sql_str, [str(log_id)])\n return x\n <mask token>\n\n def update_record(self, lst, logID):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n rowData = (\n 'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'\n )\n c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +\n ' WHERE logID=' + logID, lst)\n\n\nclass File:\n\n @staticmethod\n def db_connect(sets):\n try:\n fname = sets['DB_FILE_NAME']\n except KeyError:\n fname = None\n try:\n tname = sets['DB_TABLE_NAME']\n except KeyError:\n tname = None\n conn = DbManager(fname=fname, tname=tname)\n return conn\n\n @staticmethod\n def generate_css_min():\n MinifyFilesPre.min_css_file('resources/static/styles/main.css')\n\n @staticmethod\n def pre_merge(merge=False):\n if merge:\n tmp_file = MinifyFilesPre()\n tmp_file.js_merge()\n tmp_file.save()\n else:\n MinifyFilesPre.get_js_files()\n\n @staticmethod\n def get_first(self):\n return self.get_first_index()\n\n @staticmethod\n def save_dic(dic):\n \"\"\" Saves the given dictionary of serials to a file \"\"\"\n json.dump(dic, open('resources/files/serials.csv', 'w'))\n\n @staticmethod\n def read_dic():\n \"\"\" reads the dictionary of serials \"\"\"\n dic = OrdDic()\n dic.update(json.load(open('resources/files/serials.csv', 'r')))\n return dic\n\n @staticmethod\n def read_legacy():\n \"\"\" Depreciated reads the dictionary and returns it in the legacy format \"\"\"\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic\n\n @staticmethod\n def read_locations():\n \"\"\" reads the file containing the locations \"\"\"\n r = open('resources/files/locations.txt', 'r', newline='\\n')\n locations = r.read().split('\\n')\n return locations\n\n @staticmethod\n def save_Locations(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/locations.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def save_callsigns(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/callsigns.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def read_callsigns():\n \"\"\" reads the file containing the callsigns \"\"\"\n r = open('resources/files/callsigns.txt', 'r', newline='\\n')\n callsigns = r.read().split('\\n')\n return callsigns\n\n @staticmethod\n def read_settings():\n \"\"\" reads the settings from file \"\"\"\n settings = OrdDic()\n settings.update(json.load(open('resources/files/settings.txt', 'r')))\n return settings\n\n @staticmethod\n def save_settings(dic):\n \"\"\" saves the given settings (dictionary) to file \"\"\"\n json.dump(dic, open('resources/files/settings.txt', 'w'))\n\n @staticmethod\n def save_log(self, log, update=False):\n \"\"\" Saves the log to file \"\"\"\n main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']\n lst = []\n for key in main_keys:\n lst.append(log[key])\n log.pop(key)\n lst.append(json.dumps(log))\n if update:\n self.update_record(lst, log['logID'])\n else:\n self.new_return(lst)\n\n @staticmethod\n def load_log_query(Db, query):\n x = list(Db.query_data(query, 100))\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': row[4]})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def load_log(Db, log_id=None):\n \"\"\" loads the log file \"\"\"\n if log_id:\n row = Db.find_index(log_id).fetchone()\n local_log = list()\n ret = None\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n except TypeError:\n pass\n return ret\n else:\n try:\n x = list(Db.read_return(entries=100))\n except TypeError:\n x = ''\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def delete_log_byID(Db, id):\n Db.delete_return_byID(id)\n\n\n<mask token>\n\n\nclass SaveTimer(object):\n\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MinifyFilesPre:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass DbManager:\n\n def __init__(self, fname=None, tname=None):\n if fname:\n self.FILE_NAME = fname\n else:\n self.FILE_NAME = 'resources/static/LOG_Temp.db'\n if tname:\n self.TABLE_NAME = tname\n else:\n self.TABLE_NAME = \"'LOG_RETURNS'\"\n\n def query_data(self, conditions, entries):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n condition_order = ['logID', 'returnType', 'sender',\n 'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']\n cond_list = []\n cond_string_list = []\n for cond in condition_order:\n val = ''\n try:\n val = conditions[cond]\n except KeyError:\n val = ''\n if '|' in val:\n i = 0\n dep = list()\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['other']:\n cond = 'serials'\n val = conditions['other']\n if '|' in val:\n i = 0\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['logTimeFrom']:\n if conditions['logTimeTo']:\n cond_string_list.append('logTime>= ? AND logTime<= ?')\n cond_list.append(conditions['logTimeFrom'])\n cond_list.append(conditions['logTimeTo'])\n else:\n cond_string_list.append('logTime>= ?')\n cond_list.append(conditions['logTimeFrom'])\n elif conditions['logTimeTo']:\n cond_string_list.append('logTime <= ?')\n cond_list.append(conditions['logTimeTo'])\n cond_string = ' AND '.join(cond_string_list)\n print(cond_string)\n print(cond_list)\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'\n , cond_list)\n return results\n except sqlite3.OperationalError as e:\n print(e)\n\n def create_db(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE {self.TABLE_NAME} (\n `logID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `returnType`\ttext,\n `sender`\ttext,\n `reciever`\ttext,\n `logTime`\tinteger,\n `dutyOfficer`\ttext,\n `net`\tTEXT,\n `serials`\ttext\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def count_records(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n results = c.execute(f\"SELECT COUNT('LogID') FROM {self.TABLE_NAME}\"\n )\n return results\n\n def create_game_table(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE `{self.TABLE_NAME}` (\n `GameID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `Name`\tTEXT DEFAULT '?',\n `Rank`\tTEXT DEFAULT '?',\n `Pl`\tTEXT DEFAULT '?',\n `Score`\tINTEGER DEFAULT 0,\n `Time`\tINTEGER\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def new_return(self, lst):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute('INSERT INTO ' + self.TABLE_NAME +\n ' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)\n except sqlite3.OperationalError as e:\n print(e)\n \"\"\"\n if 'no such table' in str(e):\n if \"game\" in str(self.FILE_NAME):\n print(\"MEME\")\n self.create_game_table()\n else:\n self.create_db()\n self.new_return(lst)\n \"\"\"\n\n def delete_return_byID(self, id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')\n\n def read_return(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'\n )\n else:\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_db(self)\n\n def read_game_score(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'\n )\n else:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_game_table(self)\n\n def find_index(self, log_id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'\n x = c.execute(sql_str, [str(log_id)])\n return x\n\n def get_first_index(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n i = ''\n c = conn.cursor()\n sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +\n ' WHERE logID = (SELECT MAX(logID) FROM ' + self.\n TABLE_NAME + ')')\n x = c.execute(sqlStr)\n for i in x:\n i = int(list(i)[0])\n try:\n return i\n except UnboundLocalError:\n return ''\n\n def update_record(self, lst, logID):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n rowData = (\n 'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'\n )\n c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +\n ' WHERE logID=' + logID, lst)\n\n\nclass File:\n\n @staticmethod\n def db_connect(sets):\n try:\n fname = sets['DB_FILE_NAME']\n except KeyError:\n fname = None\n try:\n tname = sets['DB_TABLE_NAME']\n except KeyError:\n tname = None\n conn = DbManager(fname=fname, tname=tname)\n return conn\n\n @staticmethod\n def generate_css_min():\n MinifyFilesPre.min_css_file('resources/static/styles/main.css')\n\n @staticmethod\n def pre_merge(merge=False):\n if merge:\n tmp_file = MinifyFilesPre()\n tmp_file.js_merge()\n tmp_file.save()\n else:\n MinifyFilesPre.get_js_files()\n\n @staticmethod\n def get_first(self):\n return self.get_first_index()\n\n @staticmethod\n def save_dic(dic):\n \"\"\" Saves the given dictionary of serials to a file \"\"\"\n json.dump(dic, open('resources/files/serials.csv', 'w'))\n\n @staticmethod\n def read_dic():\n \"\"\" reads the dictionary of serials \"\"\"\n dic = OrdDic()\n dic.update(json.load(open('resources/files/serials.csv', 'r')))\n return dic\n\n @staticmethod\n def read_legacy():\n \"\"\" Depreciated reads the dictionary and returns it in the legacy format \"\"\"\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic\n\n @staticmethod\n def read_locations():\n \"\"\" reads the file containing the locations \"\"\"\n r = open('resources/files/locations.txt', 'r', newline='\\n')\n locations = r.read().split('\\n')\n return locations\n\n @staticmethod\n def save_Locations(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/locations.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def save_callsigns(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/callsigns.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def read_callsigns():\n \"\"\" reads the file containing the callsigns \"\"\"\n r = open('resources/files/callsigns.txt', 'r', newline='\\n')\n callsigns = r.read().split('\\n')\n return callsigns\n\n @staticmethod\n def read_settings():\n \"\"\" reads the settings from file \"\"\"\n settings = OrdDic()\n settings.update(json.load(open('resources/files/settings.txt', 'r')))\n return settings\n\n @staticmethod\n def save_settings(dic):\n \"\"\" saves the given settings (dictionary) to file \"\"\"\n json.dump(dic, open('resources/files/settings.txt', 'w'))\n\n @staticmethod\n def save_log(self, log, update=False):\n \"\"\" Saves the log to file \"\"\"\n main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']\n lst = []\n for key in main_keys:\n lst.append(log[key])\n log.pop(key)\n lst.append(json.dumps(log))\n if update:\n self.update_record(lst, log['logID'])\n else:\n self.new_return(lst)\n\n @staticmethod\n def load_log_query(Db, query):\n x = list(Db.query_data(query, 100))\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': row[4]})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def load_log(Db, log_id=None):\n \"\"\" loads the log file \"\"\"\n if log_id:\n row = Db.find_index(log_id).fetchone()\n local_log = list()\n ret = None\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n except TypeError:\n pass\n return ret\n else:\n try:\n x = list(Db.read_return(entries=100))\n except TypeError:\n x = ''\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def delete_log_byID(Db, id):\n Db.delete_return_byID(id)\n\n\n<mask token>\n\n\nclass SaveTimer(object):\n\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MinifyFilesPre:\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def min_js_file(file_name):\n js = jsmin(open(file_name, newline='\\n').read())\n open(file_name, 'w', newline='\\n').write(js)\n\n @staticmethod\n def min_css_file(file_name):\n css = compress(open(file_name, newline='\\n').read())\n open(file_name[:-4] + '.min.css', 'w', newline='\\n').write(css)\n <mask token>\n\n\nclass DbManager:\n\n def __init__(self, fname=None, tname=None):\n if fname:\n self.FILE_NAME = fname\n else:\n self.FILE_NAME = 'resources/static/LOG_Temp.db'\n if tname:\n self.TABLE_NAME = tname\n else:\n self.TABLE_NAME = \"'LOG_RETURNS'\"\n\n def query_data(self, conditions, entries):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n condition_order = ['logID', 'returnType', 'sender',\n 'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']\n cond_list = []\n cond_string_list = []\n for cond in condition_order:\n val = ''\n try:\n val = conditions[cond]\n except KeyError:\n val = ''\n if '|' in val:\n i = 0\n dep = list()\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['other']:\n cond = 'serials'\n val = conditions['other']\n if '|' in val:\n i = 0\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['logTimeFrom']:\n if conditions['logTimeTo']:\n cond_string_list.append('logTime>= ? AND logTime<= ?')\n cond_list.append(conditions['logTimeFrom'])\n cond_list.append(conditions['logTimeTo'])\n else:\n cond_string_list.append('logTime>= ?')\n cond_list.append(conditions['logTimeFrom'])\n elif conditions['logTimeTo']:\n cond_string_list.append('logTime <= ?')\n cond_list.append(conditions['logTimeTo'])\n cond_string = ' AND '.join(cond_string_list)\n print(cond_string)\n print(cond_list)\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'\n , cond_list)\n return results\n except sqlite3.OperationalError as e:\n print(e)\n\n def create_db(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE {self.TABLE_NAME} (\n `logID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `returnType`\ttext,\n `sender`\ttext,\n `reciever`\ttext,\n `logTime`\tinteger,\n `dutyOfficer`\ttext,\n `net`\tTEXT,\n `serials`\ttext\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def count_records(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n results = c.execute(f\"SELECT COUNT('LogID') FROM {self.TABLE_NAME}\"\n )\n return results\n\n def create_game_table(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE `{self.TABLE_NAME}` (\n `GameID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `Name`\tTEXT DEFAULT '?',\n `Rank`\tTEXT DEFAULT '?',\n `Pl`\tTEXT DEFAULT '?',\n `Score`\tINTEGER DEFAULT 0,\n `Time`\tINTEGER\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def new_return(self, lst):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute('INSERT INTO ' + self.TABLE_NAME +\n ' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)\n except sqlite3.OperationalError as e:\n print(e)\n \"\"\"\n if 'no such table' in str(e):\n if \"game\" in str(self.FILE_NAME):\n print(\"MEME\")\n self.create_game_table()\n else:\n self.create_db()\n self.new_return(lst)\n \"\"\"\n\n def delete_return_byID(self, id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')\n\n def read_return(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'\n )\n else:\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_db(self)\n\n def read_game_score(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'\n )\n else:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_game_table(self)\n\n def find_index(self, log_id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'\n x = c.execute(sql_str, [str(log_id)])\n return x\n\n def get_first_index(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n i = ''\n c = conn.cursor()\n sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +\n ' WHERE logID = (SELECT MAX(logID) FROM ' + self.\n TABLE_NAME + ')')\n x = c.execute(sqlStr)\n for i in x:\n i = int(list(i)[0])\n try:\n return i\n except UnboundLocalError:\n return ''\n\n def update_record(self, lst, logID):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n rowData = (\n 'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'\n )\n c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +\n ' WHERE logID=' + logID, lst)\n\n\nclass File:\n\n @staticmethod\n def db_connect(sets):\n try:\n fname = sets['DB_FILE_NAME']\n except KeyError:\n fname = None\n try:\n tname = sets['DB_TABLE_NAME']\n except KeyError:\n tname = None\n conn = DbManager(fname=fname, tname=tname)\n return conn\n\n @staticmethod\n def generate_css_min():\n MinifyFilesPre.min_css_file('resources/static/styles/main.css')\n\n @staticmethod\n def pre_merge(merge=False):\n if merge:\n tmp_file = MinifyFilesPre()\n tmp_file.js_merge()\n tmp_file.save()\n else:\n MinifyFilesPre.get_js_files()\n\n @staticmethod\n def get_first(self):\n return self.get_first_index()\n\n @staticmethod\n def save_dic(dic):\n \"\"\" Saves the given dictionary of serials to a file \"\"\"\n json.dump(dic, open('resources/files/serials.csv', 'w'))\n\n @staticmethod\n def read_dic():\n \"\"\" reads the dictionary of serials \"\"\"\n dic = OrdDic()\n dic.update(json.load(open('resources/files/serials.csv', 'r')))\n return dic\n\n @staticmethod\n def read_legacy():\n \"\"\" Depreciated reads the dictionary and returns it in the legacy format \"\"\"\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic\n\n @staticmethod\n def read_locations():\n \"\"\" reads the file containing the locations \"\"\"\n r = open('resources/files/locations.txt', 'r', newline='\\n')\n locations = r.read().split('\\n')\n return locations\n\n @staticmethod\n def save_Locations(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/locations.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def save_callsigns(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/callsigns.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def read_callsigns():\n \"\"\" reads the file containing the callsigns \"\"\"\n r = open('resources/files/callsigns.txt', 'r', newline='\\n')\n callsigns = r.read().split('\\n')\n return callsigns\n\n @staticmethod\n def read_settings():\n \"\"\" reads the settings from file \"\"\"\n settings = OrdDic()\n settings.update(json.load(open('resources/files/settings.txt', 'r')))\n return settings\n\n @staticmethod\n def save_settings(dic):\n \"\"\" saves the given settings (dictionary) to file \"\"\"\n json.dump(dic, open('resources/files/settings.txt', 'w'))\n\n @staticmethod\n def save_log(self, log, update=False):\n \"\"\" Saves the log to file \"\"\"\n main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']\n lst = []\n for key in main_keys:\n lst.append(log[key])\n log.pop(key)\n lst.append(json.dumps(log))\n if update:\n self.update_record(lst, log['logID'])\n else:\n self.new_return(lst)\n\n @staticmethod\n def load_log_query(Db, query):\n x = list(Db.query_data(query, 100))\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': row[4]})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def load_log(Db, log_id=None):\n \"\"\" loads the log file \"\"\"\n if log_id:\n row = Db.find_index(log_id).fetchone()\n local_log = list()\n ret = None\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n except TypeError:\n pass\n return ret\n else:\n try:\n x = list(Db.read_return(entries=100))\n except TypeError:\n x = ''\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def delete_log_byID(Db, id):\n Db.delete_return_byID(id)\n\n\n<mask token>\n\n\nclass SaveTimer(object):\n\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MinifyFilesPre:\n\n def __init__(self, merge=False):\n file_names = glob('resources/static/js_files/*.js')\n file_names.remove('resources/static/js_files/full_version.js')\n self.file_names = file_names\n self.merge = merge\n self.js = ''\n\n def save(self):\n \"\"\"combines several js files together, with optional minification\"\"\"\n with open('resources/static/js_files/full_version.js', 'w', newline\n ='\\n') as w:\n w.write(self.js)\n\n def js_merge(self):\n \"\"\"saves minified version to a single one\"\"\"\n if self.merge:\n js = ''\n for file_name in self.file_names:\n try:\n js += jsmin(open(file_name, newline='\\n').read())\n except FileNotFoundError:\n print(f'The file {file_name} could not be found')\n self.js = jsmin(js)\n else:\n for file_name in self.file_names:\n js = jsmin(open(file_name, newline='\\n').read())\n open(file_name, 'w', newline='\\n').write(js)\n\n @staticmethod\n def min_js_file(file_name):\n js = jsmin(open(file_name, newline='\\n').read())\n open(file_name, 'w', newline='\\n').write(js)\n\n @staticmethod\n def min_css_file(file_name):\n css = compress(open(file_name, newline='\\n').read())\n open(file_name[:-4] + '.min.css', 'w', newline='\\n').write(css)\n\n @staticmethod\n def get_js_files():\n file_names = glob('resources/static/js_files/*.js')\n file_names.remove('resources/static/js_files/full_version.js')\n\n\nclass DbManager:\n\n def __init__(self, fname=None, tname=None):\n if fname:\n self.FILE_NAME = fname\n else:\n self.FILE_NAME = 'resources/static/LOG_Temp.db'\n if tname:\n self.TABLE_NAME = tname\n else:\n self.TABLE_NAME = \"'LOG_RETURNS'\"\n\n def query_data(self, conditions, entries):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n condition_order = ['logID', 'returnType', 'sender',\n 'reciever', 'logTime', 'dutyOfficer', 'net', 'serials']\n cond_list = []\n cond_string_list = []\n for cond in condition_order:\n val = ''\n try:\n val = conditions[cond]\n except KeyError:\n val = ''\n if '|' in val:\n i = 0\n dep = list()\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['other']:\n cond = 'serials'\n val = conditions['other']\n if '|' in val:\n i = 0\n for sub_val in val.split('|'):\n i += 1\n cond_list.append(f'%{sub_val}%')\n cond_string_list.append('(' +\n f'lower({cond}) LIKE ?' + \n f' OR lower({cond}) LIKE ?' * (i - 1) + ')')\n else:\n for sub_val in val.split(', '):\n cond_string_list.append(f'lower({cond}) LIKE ?')\n sub_val = f'%{sub_val.lower()}%'\n cond_list.append(sub_val)\n if conditions['logTimeFrom']:\n if conditions['logTimeTo']:\n cond_string_list.append('logTime>= ? AND logTime<= ?')\n cond_list.append(conditions['logTimeFrom'])\n cond_list.append(conditions['logTimeTo'])\n else:\n cond_string_list.append('logTime>= ?')\n cond_list.append(conditions['logTimeFrom'])\n elif conditions['logTimeTo']:\n cond_string_list.append('logTime <= ?')\n cond_list.append(conditions['logTimeTo'])\n cond_string = ' AND '.join(cond_string_list)\n print(cond_string)\n print(cond_list)\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} WHERE {cond_string} ORDER BY logID DESC LIMIT {entries}'\n , cond_list)\n return results\n except sqlite3.OperationalError as e:\n print(e)\n\n def create_db(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE {self.TABLE_NAME} (\n `logID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `returnType`\ttext,\n `sender`\ttext,\n `reciever`\ttext,\n `logTime`\tinteger,\n `dutyOfficer`\ttext,\n `net`\tTEXT,\n `serials`\ttext\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def count_records(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n results = c.execute(f\"SELECT COUNT('LogID') FROM {self.TABLE_NAME}\"\n )\n return results\n\n def create_game_table(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n try:\n c.execute(\n f\"\"\"CREATE TABLE `{self.TABLE_NAME}` (\n `GameID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `Name`\tTEXT DEFAULT '?',\n `Rank`\tTEXT DEFAULT '?',\n `Pl`\tTEXT DEFAULT '?',\n `Score`\tINTEGER DEFAULT 0,\n `Time`\tINTEGER\n );\"\"\"\n )\n conn.commit()\n except sqlite3.OperationalError:\n print('The Db already exists')\n if ret:\n return self.read_return()\n\n def new_return(self, lst):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute('INSERT INTO ' + self.TABLE_NAME +\n ' VALUES (NULL,' + '?, ' * (len(lst) - 1) + '?)', lst)\n except sqlite3.OperationalError as e:\n print(e)\n \"\"\"\n if 'no such table' in str(e):\n if \"game\" in str(self.FILE_NAME):\n print(\"MEME\")\n self.create_game_table()\n else:\n self.create_db()\n self.new_return(lst)\n \"\"\"\n\n def delete_return_byID(self, id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute(f'DELETE FROM {self.TABLE_NAME} WHERE logID = {id}')\n\n def read_return(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}'\n )\n else:\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_db(self)\n\n def read_game_score(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}'\n )\n else:\n results = c.execute(\n f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_game_table(self)\n\n def find_index(self, log_id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n sql_str = 'SELECT * FROM ' + self.TABLE_NAME + ' WHERE logID=?'\n x = c.execute(sql_str, [str(log_id)])\n return x\n\n def get_first_index(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n i = ''\n c = conn.cursor()\n sqlStr = ('SELECT logID FROM ' + self.TABLE_NAME +\n ' WHERE logID = (SELECT MAX(logID) FROM ' + self.\n TABLE_NAME + ')')\n x = c.execute(sqlStr)\n for i in x:\n i = int(list(i)[0])\n try:\n return i\n except UnboundLocalError:\n return ''\n\n def update_record(self, lst, logID):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n rowData = (\n 'returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?'\n )\n c.execute('UPDATE ' + self.TABLE_NAME + ' SET ' + rowData +\n ' WHERE logID=' + logID, lst)\n\n\nclass File:\n\n @staticmethod\n def db_connect(sets):\n try:\n fname = sets['DB_FILE_NAME']\n except KeyError:\n fname = None\n try:\n tname = sets['DB_TABLE_NAME']\n except KeyError:\n tname = None\n conn = DbManager(fname=fname, tname=tname)\n return conn\n\n @staticmethod\n def generate_css_min():\n MinifyFilesPre.min_css_file('resources/static/styles/main.css')\n\n @staticmethod\n def pre_merge(merge=False):\n if merge:\n tmp_file = MinifyFilesPre()\n tmp_file.js_merge()\n tmp_file.save()\n else:\n MinifyFilesPre.get_js_files()\n\n @staticmethod\n def get_first(self):\n return self.get_first_index()\n\n @staticmethod\n def save_dic(dic):\n \"\"\" Saves the given dictionary of serials to a file \"\"\"\n json.dump(dic, open('resources/files/serials.csv', 'w'))\n\n @staticmethod\n def read_dic():\n \"\"\" reads the dictionary of serials \"\"\"\n dic = OrdDic()\n dic.update(json.load(open('resources/files/serials.csv', 'r')))\n return dic\n\n @staticmethod\n def read_legacy():\n \"\"\" Depreciated reads the dictionary and returns it in the legacy format \"\"\"\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic\n\n @staticmethod\n def read_locations():\n \"\"\" reads the file containing the locations \"\"\"\n r = open('resources/files/locations.txt', 'r', newline='\\n')\n locations = r.read().split('\\n')\n return locations\n\n @staticmethod\n def save_Locations(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/locations.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def save_callsigns(lst):\n lst = '\\n'.join(lst)\n w = open('resources/files/callsigns.txt', 'w', newline='\\n')\n w.write(lst)\n\n @staticmethod\n def read_callsigns():\n \"\"\" reads the file containing the callsigns \"\"\"\n r = open('resources/files/callsigns.txt', 'r', newline='\\n')\n callsigns = r.read().split('\\n')\n return callsigns\n\n @staticmethod\n def read_settings():\n \"\"\" reads the settings from file \"\"\"\n settings = OrdDic()\n settings.update(json.load(open('resources/files/settings.txt', 'r')))\n return settings\n\n @staticmethod\n def save_settings(dic):\n \"\"\" saves the given settings (dictionary) to file \"\"\"\n json.dump(dic, open('resources/files/settings.txt', 'w'))\n\n @staticmethod\n def save_log(self, log, update=False):\n \"\"\" Saves the log to file \"\"\"\n main_keys = ['name', 'sender', 'receiver', 'time', 'duty', 'net']\n lst = []\n for key in main_keys:\n lst.append(log[key])\n log.pop(key)\n lst.append(json.dumps(log))\n if update:\n self.update_record(lst, log['logID'])\n else:\n self.new_return(lst)\n\n @staticmethod\n def load_log_query(Db, query):\n x = list(Db.query_data(query, 100))\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': row[4]})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def load_log(Db, log_id=None):\n \"\"\" loads the log file \"\"\"\n if log_id:\n row = Db.find_index(log_id).fetchone()\n local_log = list()\n ret = None\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n except TypeError:\n pass\n return ret\n else:\n try:\n x = list(Db.read_return(entries=100))\n except TypeError:\n x = ''\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n ret.update(json.loads(row[7]))\n local_log.append(ret)\n except TypeError:\n print('none value in db')\n return local_log\n\n @staticmethod\n def delete_log_byID(Db, id):\n Db.delete_return_byID(id)\n\n\ndef fix_time(dtg):\n if len(str(dtg)) == 6:\n return str(dtg)\n else:\n return str(f'0{dtg}')\n\n\nclass SaveTimer(object):\n\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\n\n<mask token>\n",
"step-5": "from csv import reader, writer\nfrom collections import OrderedDict as OrdDic\nimport sqlite3\nfrom jsmin import jsmin\nfrom glob import glob\nfrom csscompressor import compress\nfrom threading import Timer\nfrom glob import glob\nimport os\nimport shutil\nimport logging\nimport json\n\nclass MinifyFilesPre:\n def __init__(self, merge=False):\n\n file_names = glob(\"resources/static/js_files/*.js\")\n file_names.remove(\"resources/static/js_files/full_version.js\")\n self.file_names = file_names\n self.merge = merge\n self.js = \"\"\n\n def save(self):\n \"\"\"combines several js files together, with optional minification\"\"\"\n with open(\"resources/static/js_files/full_version.js\", 'w', newline=\"\\n\") as w:\n w.write(self.js)\n\n def js_merge(self):\n \"\"\"saves minified version to a single one\"\"\"\n if self.merge:\n js = \"\"\n for file_name in self.file_names:\n try:\n js += jsmin(open(file_name, newline=\"\\n\").read())\n except FileNotFoundError:\n print(f\"The file {file_name} could not be found\")\n self.js = jsmin(js)\n\n else:\n for file_name in self.file_names:\n js = jsmin(open(file_name, newline=\"\\n\").read())\n open(file_name, 'w', newline=\"\\n\").write(js)\n\n @staticmethod\n def min_js_file(file_name):\n js = jsmin(open(file_name, newline=\"\\n\").read())\n open(file_name, 'w', newline=\"\\n\").write(js)\n\n @staticmethod\n def min_css_file(file_name):\n css = compress(open(file_name, newline=\"\\n\").read())\n open(file_name[:-4] + '.min.css', 'w', newline=\"\\n\").write(css)\n\n @staticmethod\n def get_js_files():\n file_names = glob(\"resources/static/js_files/*.js\")\n file_names.remove(\"resources/static/js_files/full_version.js\")\n\n\nclass DbManager:\n\n def __init__(self, fname=None, tname=None):\n if fname:\n self.FILE_NAME = fname\n else:\n self.FILE_NAME = 'resources/static/LOG_Temp.db'\n\n if tname:\n self.TABLE_NAME = tname\n else:\n self.TABLE_NAME = \"'LOG_RETURNS'\"\n\n\n def query_data(self, conditions, entries):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n condition_order = ['logID',\n 'returnType',\n 'sender',\n 'reciever',\n 'logTime',\n 'dutyOfficer',\n 'net',\n 'serials']\n\n cond_list = []\n cond_string_list = []\n for cond in condition_order:\n val = \"\"\n try:\n val = conditions[cond]\n except KeyError:\n val = \"\"\n\n\n if \"|\" in val:\n i = 0\n dep = list()\n for sub_val in val.split(\"|\"):\n i+=1\n cond_list.append(f\"%{sub_val}%\")\n\n cond_string_list.append(\"(\"+f\"lower({cond}) LIKE ?\"+ f\" OR lower({cond}) LIKE ?\"*(i-1)+\")\")\n\n else:\n for sub_val in val.split(\", \"):\n cond_string_list.append(f\"lower({cond}) LIKE ?\")\n sub_val = f\"%{sub_val.lower()}%\"\n cond_list.append(sub_val)\n\n\n if conditions['other']:\n cond = \"serials\"\n val = conditions['other']\n if \"|\" in val:\n i = 0\n for sub_val in val.split(\"|\"):\n i+=1\n cond_list.append(f\"%{sub_val}%\")\n\n cond_string_list.append(\"(\"+f\"lower({cond}) LIKE ?\"+ f\" OR lower({cond}) LIKE ?\"*(i-1)+\")\")\n\n else:\n for sub_val in val.split(\", \"):\n cond_string_list.append(f\"lower({cond}) LIKE ?\")\n sub_val = f\"%{sub_val.lower()}%\"\n cond_list.append(sub_val)\n\n if conditions['logTimeFrom']:\n if conditions['logTimeTo']:\n cond_string_list.append(\"logTime>= ? AND logTime<= ?\")\n cond_list.append(conditions['logTimeFrom'])\n cond_list.append(conditions['logTimeTo'])\n else:\n cond_string_list.append(\"logTime>= ?\")\n cond_list.append(conditions['logTimeFrom'])\n elif conditions['logTimeTo']:\n cond_string_list.append(\"logTime <= ?\")\n cond_list.append(conditions['logTimeTo'])\n\n cond_string = ' AND '.join(cond_string_list)\n\n print(cond_string)\n print(cond_list)\n\n results = c.execute(f\"SELECT * FROM {self.TABLE_NAME} WHERE \"\n f\"{cond_string}\"\n f\" ORDER BY logID DESC LIMIT {entries}\", cond_list)\n return results\n\n except sqlite3.OperationalError as e:\n print(e)\n\n def create_db(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n # Create table\n try:\n c.execute(f'''CREATE TABLE {self.TABLE_NAME} (\n `logID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `returnType`\ttext,\n `sender`\ttext,\n `reciever`\ttext,\n `logTime`\tinteger,\n `dutyOfficer`\ttext,\n `net`\tTEXT,\n `serials`\ttext\n );''')\n\n conn.commit()\n\n except sqlite3.OperationalError:\n print(\"The Db already exists\")\n\n if ret:\n return self.read_return()\n\n def count_records(self):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n results = c.execute(f\"SELECT COUNT('LogID') FROM {self.TABLE_NAME}\")\n return results\n\n\n def create_game_table(self, ret=False):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n # Create table\n try:\n c.execute(f'''CREATE TABLE `{self.TABLE_NAME}` (\n `GameID`\tINTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,\n `Name`\tTEXT DEFAULT '?',\n `Rank`\tTEXT DEFAULT '?',\n `Pl`\tTEXT DEFAULT '?',\n `Score`\tINTEGER DEFAULT 0,\n `Time`\tINTEGER\n );''')\n\n conn.commit()\n\n except sqlite3.OperationalError:\n print(\"The Db already exists\")\n\n if ret:\n return self.read_return()\n\n\n def new_return(self, lst):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute(\n 'INSERT INTO ' + self.TABLE_NAME + ' VALUES (NULL,' +\n '?, ' * (len(lst) - 1) + '?)',\n lst)\n except sqlite3.OperationalError as e:\n print(e)\n \"\"\"\n if 'no such table' in str(e):\n if \"game\" in str(self.FILE_NAME):\n print(\"MEME\")\n self.create_game_table()\n else:\n self.create_db()\n self.new_return(lst)\n \"\"\"\n\n\n def delete_return_byID(self, id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n c.execute(f\"DELETE FROM {self.TABLE_NAME} WHERE logID = {id}\")\n\n\n def read_return(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(f\"SELECT * FROM {self.TABLE_NAME} ORDER BY logID DESC LIMIT {entries}\")\n else:\n # should not be used but just here just in case\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME}')\n\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_db(self)\n\n def read_game_score(self, entries=None):\n try:\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n if entries:\n results = c.execute(f\"SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC LIMIT {entries}\")\n else:\n # should not be used but just here just in case\n results = c.execute(f'SELECT * FROM {self.TABLE_NAME} ORDER BY Score DESC')\n\n return results\n except sqlite3.OperationalError as e:\n if 'no such table' in str(e):\n DbManager.create_game_table(self)\n\n\n def find_index(self, log_id):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n sql_str = (\"\"\"SELECT * FROM \"\"\" +\n self.TABLE_NAME +\n \"\"\" WHERE logID=?\"\"\")\n x = c.execute(sql_str, [str(log_id)])\n return x\n\n\n def get_first_index(self):\n\n with sqlite3.connect(self.FILE_NAME) as conn:\n i=\"\"\n c = conn.cursor()\n sqlStr = (\"\"\"SELECT logID FROM \"\"\" +\n self.TABLE_NAME +\n \"\"\" WHERE logID = (SELECT MAX(logID) FROM \"\"\" +\n self.TABLE_NAME + \")\")\n x = c.execute(sqlStr)\n for i in x:\n i = int(list(i)[0])\n try:\n return i\n except UnboundLocalError:\n return \"\"\n\n def update_record(self, lst, logID):\n with sqlite3.connect(self.FILE_NAME) as conn:\n c = conn.cursor()\n rowData = \"\"\"returnType=?, sender=?, reciever=?, logTime=?, dutyOfficer=?, net=?, serials=?\"\"\"\n c.execute(\n 'UPDATE ' + self.TABLE_NAME + ' SET ' + rowData + ' WHERE logID=' + logID,\n lst)\n\n\nclass File:\n\n @staticmethod\n def db_connect(sets):\n try:\n fname = sets['DB_FILE_NAME']\n except KeyError:\n fname = None\n\n try:\n tname = sets['DB_TABLE_NAME']\n except KeyError:\n tname = None\n\n conn = DbManager(fname=fname, tname=tname)\n return conn\n\n\n @staticmethod\n def generate_css_min():\n MinifyFilesPre.min_css_file('resources/static/styles/main.css')\n\n @staticmethod\n def pre_merge(merge=False):\n\n if merge:\n tmp_file = MinifyFilesPre()\n tmp_file.js_merge()\n tmp_file.save()\n else:\n MinifyFilesPre.get_js_files()\n\n @staticmethod\n def get_first(self):\n return self.get_first_index()\n\n @staticmethod\n def save_dic(dic):\n \"\"\" Saves the given dictionary of serials to a file \"\"\"\n json.dump(dic, open(\"resources/files/serials.csv\", \"w\"))\n\n\n # w = writer(open(\"resources/files/serials.csv\", \"w\", newline=\"\\n\"))\n # w.writerow(['Return Name', 'Serials'])\n # for name, serials in dic.items():\n # lst = []\n # if name == \"Return Name\":\n # lst.append(name)\n # lst.append(serials)\n # else:\n # for serial in serials:\n # if serial == \"Return Name\":\n # lst.append(serials)\n # else:\n # inner_lst = []\n # for cont in serials[serial]:\n # if cont == \"options\":\n # inner_lst.append(cont + \";;@@;;\" +\n # \";;##;;\".join(\n # serials\n # [serial]\n # [\"options\"]))\n # else:\n # inner_lst.append(\n # cont + \";;@@;;\" + serials[serial][cont])\n # lst.append(serial + ';;:::;;' + \";;!!!;;\".join(inner_lst))\n # w.writerow([(name), (';;,,,;;'.join(lst))])\n\n @staticmethod\n def read_dic():\n \"\"\" reads the dictionary of serials \"\"\"\n # should return the original format\n dic = OrdDic()\n dic.update(json.load(open(\"resources/files/serials.csv\", \"r\")))\n\n\n # OLD CODE\n # logging.log(logging.INFO, \"File path: \"+os.path.realpath(__file__))\n # r = reader(open(\"resources/files/serials.csv\", \"r\", newline=\"\\n\"))\n # i = 0\n # for row in r:\n # if i:\n # inner_dic = OrdDic()\n # for serial in row[1].split(';;,,,;;'):\n # serial = serial.split(';;:::;;')\n # sub_dic = OrdDic()\n # for sub_serial in serial[1].split(';;!!!;;'):\n # sub_serial = sub_serial.split(\";;@@;;\")\n # if sub_serial[0] == 'options':\n # options = sub_serial[1].split(\";;##;;\")\n # sub_dic.update({sub_serial[0]: options})\n # else:\n # sub_dic.update(\n # {sub_serial[0]: sub_serial[1]})\n # inner_dic.update({serial[0]: sub_dic})\n # # lst = row[1].split('\\\\')\n # dic.update({row[0]: inner_dic})\n # else:\n # i += 1\n # # print(\" * Read Dictionary\")\n return dic\n\n @staticmethod\n def read_legacy():\n \"\"\" Depreciated reads the dictionary and returns it in the legacy format \"\"\"\n serials = File.read_dic()\n final_dic = OrdDic()\n for name, dic in serials.items():\n inner_dic = OrdDic()\n for serial in dic:\n inner_dic.update({serial: dic[serial]['desc']})\n final_dic.update({name: inner_dic})\n return final_dic\n\n @staticmethod\n def read_locations():\n \"\"\" reads the file containing the locations \"\"\"\n r = open(\"resources/files/locations.txt\", \"r\", newline=\"\\n\")\n locations = r.read().split(\"\\n\")\n return locations\n\n @staticmethod\n def save_Locations(lst):\n lst = '\\n'.join(lst)\n w = open(\"resources/files/locations.txt\", \"w\", newline=\"\\n\")\n w.write(lst)\n\n @staticmethod\n def save_callsigns(lst):\n lst = '\\n'.join(lst)\n w = open(\"resources/files/callsigns.txt\", \"w\", newline=\"\\n\")\n w.write(lst)\n\n @staticmethod\n def read_callsigns():\n \"\"\" reads the file containing the callsigns \"\"\"\n r = open(\"resources/files/callsigns.txt\", \"r\", newline=\"\\n\")\n callsigns = r.read().split(\"\\n\")\n return callsigns\n\n @staticmethod\n def read_settings():\n \"\"\" reads the settings from file \"\"\"\n \n settings = OrdDic()\n settings.update(json.load(open(\"resources/files/settings.txt\", \"r\")))\n\n ## OLD WAY BELOW\n\n #r = open(\"resources/files/settings.txt\", \"r\", newline=\"\\n\")\n # for option in r.read().split('\\n'):\n # try:\n # #option = option.split('\\\\')\n # #settings.update({option[0]: option[1]})\n # # settings.update(json.loads(option))\n # except IndexError:\n # pass\n return settings\n\n @staticmethod\n def save_settings(dic):\n \"\"\" saves the given settings (dictionary) to file \"\"\"\n json.dump(dic, open(\"resources/files/settings.txt\", \"w\"))\n\n # LEGACY\n # with open(\"resources/files/settings.txt\", \"w\", newline=\"\\n\") as w:\n # for sett, val in dic.items():\n # w.write(sett + '\\\\' + val + '\\n')\n\n @staticmethod\n def save_log(self, log, update=False):\n \"\"\" Saves the log to file \"\"\"\n\n main_keys = [\n 'name',\n 'sender',\n 'receiver',\n 'time',\n 'duty',\n 'net'\n ]\n\n # print(test)\n\n lst = []\n for key in main_keys:\n # print(key)\n lst.append(log[key])\n log.pop(key)\n \n # LEGACY\n # inn_lst = []\n # for serial, val in log.items():\n # if not (serial in main_keys):\n # inn_lst.append(serial + '\\\\' + val)\n\n # lst.append('||'.join(inn_lst))\n\n lst.append(json.dumps(log))\n\n # print(lst)\n\n if update:\n self.update_record(lst, log['logID'])\n\n else:\n self.new_return(lst)\n\n @staticmethod\n def load_log_query(Db, query):\n\n x = list(Db.query_data(query, 100))\n\n local_log = list()\n for row in x:\n row = list(row)\n try:\n ret = OrdDic()\n\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': row[4]})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n\n ret.update(json.loads(row[7]))\n\n # LEGACY\n # for serial_data in row[7:]:\n # try:\n # for serial in serial_data.split('||'):\n # ser, val = serial.split('\\\\')\n # val = \"\" + val\n # ret.update({ser: str(val)})\n # except AttributeError:\n # print('The Db structure is incorrect')\n local_log.append(ret)\n\n except TypeError:\n print(\"none value in db\")\n return local_log\n\n @staticmethod\n def load_log(Db, log_id=None):\n \"\"\" loads the log file \"\"\"\n # try:\n # r = reader(open(\"resources/static/logs.csv\", \"r\"))\n # except FileNotFoundError:\n # w = open(\"resources/static/logs.csv\", 'w')\n # w.close()\n # r = reader(open(\"resources/static/logs.csv\", \"r\"))\n\n if log_id:\n row = Db.find_index(log_id).fetchone()\n local_log = list()\n ret = None\n try:\n ret = OrdDic()\n\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n\n ret.update(json.loads(row[7]))\n\n # LEGACY\n # for serial_data in row[7:]:\n # try:\n # for serial in serial_data.split('||'):\n # ser, val = serial.split('\\\\')\n # val = \"\" + val\n # ret.update({ser: str(val)})\n # except AttributeError:\n # print('The Db structure is incorrect')\n\n except TypeError:\n pass # This is handled upon return (it returns None type)\n\n return ret\n\n else:\n try:\n x = list(Db.read_return(entries=100))\n except TypeError:\n x = \"\"\n\n local_log = list()\n for row in x:\n row = list(row)\n\n try:\n ret = OrdDic()\n\n ret.update({'logID': row[0]})\n ret.update({'name': row[1]})\n ret.update({'sender': row[2]})\n ret.update({'receiver': row[3]})\n ret.update({'time': fix_time(row[4])})\n ret.update({'duty': row[5]})\n ret.update({'net': row[6]})\n\n ret.update(json.loads(row[7]))\n\n # LEGACY\n # for serial_data in row[7:]:\n # try:\n # for serial in serial_data.split('||'):\n # ser, val = serial.split('\\\\')\n # val = \"\" + val\n # ret.update({ser: str(val)})\n # except AttributeError:\n # print('The Db structure is incorrect')\n local_log.append(ret)\n\n except TypeError:\n print(\"none value in db\")\n\n return local_log\n\n @staticmethod\n def delete_log_byID(Db, id):\n Db.delete_return_byID(id)\n\ndef fix_time(dtg):\n if len(str(dtg)) == 6:\n return str(dtg)\n else:\n return str(f'0{dtg}')\n\n\nclass SaveTimer(object):\n def __init__(self, interval, function, *args, **kwargs):\n self._timer = None\n self.interval = interval\n self.function = function\n self.args = args\n self.kwargs = kwargs\n self.is_running = False\n self.start()\n\n def _run(self):\n self.is_running = False\n self.start()\n self.function(*self.args, **self.kwargs)\n\n def start(self):\n if not self.is_running:\n self._timer = Timer(self.interval, self._run)\n self._timer.start()\n self.is_running = True\n\n def stop(self):\n self._timer.cancel()\n self.is_running = False\n\ndef copytree(src, dst, symlinks=False, ignore=None):\n for item in os.listdir(src):\n s = os.path.join(src, item)\n d = os.path.join(dst, item)\n if os.path.isdir(s):\n shutil.copytree(s, d, symlinks, ignore)\n else:\n shutil.copy2(s, d)\n\ndef backup():\n files = glob(\"/Volumes/*\")\n\n rem = [\"/Volumes/student\", \"/Volumes/com.apple.TimeMachine.localsnapshots\", \"/Volumes/Macintosh HD\", \"Blah\"]\n\n for path in rem:\n try:\n files.remove(path)\n except ValueError:\n pass\n\n\n usb = None\n for path in files:\n if \"CP\" in path:\n usb = os.path.join(path, \"Backup\")\n break\n else:\n usb = None\n print(\"No Backup USB found\")\n\n if usb:\n # save to usb\n print(\"Saving...\", end=\" \")\n save(os.path.join(usb, \"files\"), \"resources/files\")\n save(os.path.join(usb, \"db\"), \"resources/static/db\")\n print(\"Saved\")\n\ndef save(dest, src):\n if not os.path.exists(dest):\n os.makedirs(dest)\n copytree(src, dest)\n\nif __name__ == '__main__':\n pass\n\n # File.pre_merge()\n\n # settings = File.read_settings()\n # File.save_settings(settings)\n # File.read_locations()\n # File.read_callsigns()\n # File.save_dic()\n # File.read_dic()\n\n # x = file()\n # x.save()\n",
"step-ids": [
31,
37,
39,
44,
50
]
}
|
[
31,
37,
39,
44,
50
] |
n = int(input('Informe um numero: '))
print('----------------')
print('{} x {:2} = {:2}'.format(n, 1, 1 * n))
print('{} x {:2} = {:2}'.format(n, 2, 2 * n))
print('{} x {:2} = {:2}'.format(n, 3, 3 * n))
print('{} x {:2} = {:2}'.format(n, 4, 4 * n))
print('{} x {:2} = {:2}'.format(n, 5, 5 * n))
print('{} x {:2} = {:2}'.format(n, 6, 6 * n))
print('{} x {:2} = {:2}'.format(n, 7, 7 * n))
print('{} x {:2} = {:2}'.format(n, 8, 8 * n))
print('{} x {:2} = {:2}'.format(n, 9, 9 * n))
print('{} x {:2} = {:2}'.format(n, 10, 10 * n))
print('----------------')
|
normal
|
{
"blob_id": "9e814e3f1162e248c5d778c2df9960b199854a27",
"index": 9306,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('----------------')\nprint('{} x {:2} = {:2}'.format(n, 1, 1 * n))\nprint('{} x {:2} = {:2}'.format(n, 2, 2 * n))\nprint('{} x {:2} = {:2}'.format(n, 3, 3 * n))\nprint('{} x {:2} = {:2}'.format(n, 4, 4 * n))\nprint('{} x {:2} = {:2}'.format(n, 5, 5 * n))\nprint('{} x {:2} = {:2}'.format(n, 6, 6 * n))\nprint('{} x {:2} = {:2}'.format(n, 7, 7 * n))\nprint('{} x {:2} = {:2}'.format(n, 8, 8 * n))\nprint('{} x {:2} = {:2}'.format(n, 9, 9 * n))\nprint('{} x {:2} = {:2}'.format(n, 10, 10 * n))\nprint('----------------')\n",
"step-3": "n = int(input('Informe um numero: '))\nprint('----------------')\nprint('{} x {:2} = {:2}'.format(n, 1, 1 * n))\nprint('{} x {:2} = {:2}'.format(n, 2, 2 * n))\nprint('{} x {:2} = {:2}'.format(n, 3, 3 * n))\nprint('{} x {:2} = {:2}'.format(n, 4, 4 * n))\nprint('{} x {:2} = {:2}'.format(n, 5, 5 * n))\nprint('{} x {:2} = {:2}'.format(n, 6, 6 * n))\nprint('{} x {:2} = {:2}'.format(n, 7, 7 * n))\nprint('{} x {:2} = {:2}'.format(n, 8, 8 * n))\nprint('{} x {:2} = {:2}'.format(n, 9, 9 * n))\nprint('{} x {:2} = {:2}'.format(n, 10, 10 * n))\nprint('----------------')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import re
import requests
def download_image(url: str) -> bool:
img_tag_regex = r"""<img.*?src="(.*?)"[^\>]+>"""
response = requests.get(url)
if response.status_code != 200:
return False
text = response.text
image_links = re.findall(img_tag_regex, text)
for link in image_links:
resp = requests.get(link)
with open(link.replace("https://", "").replace("http://", ""), "wb") as file:
file.write(resp.content)
return True
|
normal
|
{
"blob_id": "268c36f6fb99383ea02b7ee406189ffb467d246c",
"index": 6554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-3": "import re\nimport requests\n\n\ndef download_image(url: str) ->bool:\n img_tag_regex = '<img.*?src=\"(.*?)\"[^\\\\>]+>'\n response = requests.get(url)\n if response.status_code != 200:\n return False\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace('https://', '').replace('http://', ''), 'wb'\n ) as file:\n file.write(resp.content)\n return True\n",
"step-4": "import re\n\nimport requests\n\n\ndef download_image(url: str) -> bool:\n img_tag_regex = r\"\"\"<img.*?src=\"(.*?)\"[^\\>]+>\"\"\"\n\n response = requests.get(url)\n if response.status_code != 200:\n return False\n\n text = response.text\n image_links = re.findall(img_tag_regex, text)\n\n for link in image_links:\n resp = requests.get(link)\n with open(link.replace(\"https://\", \"\").replace(\"http://\", \"\"), \"wb\") as file:\n file.write(resp.content)\n\n return True\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_wallpaper(FOLDER):
files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]
return random.choice(files)
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_wallpaper(FOLDER):
files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]
return random.choice(files)
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Set a random wallpaper per output')
parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=
'folder to search for images')
parser.add_argument('--delay', metavar='S', type=int, help=
'How many seconds to wait before changing the wallpaper')
args = parser.parse_args()
while True:
try:
outputs = get_outputs()
cmd = 'swaybg'
for output in outputs:
image = get_wallpaper(args.folder[0])
cmd = f'{cmd} --image={image} --output={output}'
print(cmd)
proc = subprocess.Popen(cmd, shell=True)
time.sleep(args.delay)
proc.kill()
except Exception as e:
print(e, file=sys.stderr)
finally:
if proc:
proc.kill()
<|reserved_special_token_1|>
import sys
import json
import time
import random
import pathlib
import argparse
import subprocess
proc = None
def get_wallpaper(FOLDER):
files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]
return random.choice(files)
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Set a random wallpaper per output')
parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=
'folder to search for images')
parser.add_argument('--delay', metavar='S', type=int, help=
'How many seconds to wait before changing the wallpaper')
args = parser.parse_args()
while True:
try:
outputs = get_outputs()
cmd = 'swaybg'
for output in outputs:
image = get_wallpaper(args.folder[0])
cmd = f'{cmd} --image={image} --output={output}'
print(cmd)
proc = subprocess.Popen(cmd, shell=True)
time.sleep(args.delay)
proc.kill()
except Exception as e:
print(e, file=sys.stderr)
finally:
if proc:
proc.kill()
<|reserved_special_token_1|>
#!/usr/bin/env python
import sys
import json
import time
import random
import pathlib
import argparse
import subprocess
proc = None
def get_wallpaper(FOLDER):
files = [path for path in pathlib.Path(FOLDER).iterdir()
if path.is_file()]
return random.choice(files)
def get_outputs():
cmd = ['swaymsg', '-t', 'get_outputs']
proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()
proc_json = json.loads(proc_result)
return [output['name'] for output in proc_json]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Set a random wallpaper per output')
parser.add_argument('--folder', metavar='D', type=str, nargs=1,
help='folder to search for images')
parser.add_argument('--delay', metavar='S', type=int,
help='How many seconds to wait before changing the wallpaper')
args = parser.parse_args()
while True:
try:
outputs = get_outputs()
cmd = 'swaybg'
for output in outputs:
image = get_wallpaper(args.folder[0])
cmd = f'{cmd} --image={image} --output={output}'
print(cmd)
proc = subprocess.Popen(cmd, shell=True)
time.sleep(args.delay)
proc.kill()
except Exception as e:
print(e, file=sys.stderr)
finally:
if proc:
proc.kill()
|
flexible
|
{
"blob_id": "46b1991bba83968466390d306a4415b362b6a868",
"index": 3140,
"step-1": "<mask token>\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=\n 'folder to search for images')\n parser.add_argument('--delay', metavar='S', type=int, help=\n 'How many seconds to wait before changing the wallpaper')\n args = parser.parse_args()\n while True:\n try:\n outputs = get_outputs()\n cmd = 'swaybg'\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n finally:\n if proc:\n proc.kill()\n",
"step-4": "import sys\nimport json\nimport time\nimport random\nimport pathlib\nimport argparse\nimport subprocess\nproc = None\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir() if path.is_file()]\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1, help=\n 'folder to search for images')\n parser.add_argument('--delay', metavar='S', type=int, help=\n 'How many seconds to wait before changing the wallpaper')\n args = parser.parse_args()\n while True:\n try:\n outputs = get_outputs()\n cmd = 'swaybg'\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n finally:\n if proc:\n proc.kill()\n",
"step-5": "#!/usr/bin/env python\n\nimport sys\nimport json\nimport time\nimport random\nimport pathlib\nimport argparse\nimport subprocess\n\nproc = None\n\n\ndef get_wallpaper(FOLDER):\n files = [path for path in pathlib.Path(FOLDER).iterdir()\n if path.is_file()]\n\n return random.choice(files)\n\n\ndef get_outputs():\n cmd = ['swaymsg', '-t', 'get_outputs']\n proc_result = subprocess.run(cmd, capture_output=True).stdout.decode()\n proc_json = json.loads(proc_result)\n\n return [output['name'] for output in proc_json]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Set a random wallpaper per output')\n parser.add_argument('--folder', metavar='D', type=str, nargs=1,\n help='folder to search for images')\n\n parser.add_argument('--delay', metavar='S', type=int,\n help='How many seconds to wait before changing the wallpaper')\n\n args = parser.parse_args()\n\n while True:\n try:\n outputs = get_outputs()\n\n cmd = 'swaybg'\n\n for output in outputs:\n image = get_wallpaper(args.folder[0])\n cmd = f'{cmd} --image={image} --output={output}'\n\n print(cmd)\n proc = subprocess.Popen(cmd, shell=True)\n\n time.sleep(args.delay)\n proc.kill()\n except Exception as e:\n print(e, file=sys.stderr)\n\n finally:\n if proc:\n proc.kill()\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
from eval_lib.classification_results import analyze_one_classification_result
from eval_lib.classification_results import ClassificationBatches
from eval_lib.cloud_client import CompetitionDatastoreClient
from eval_lib.cloud_client import CompetitionStorageClient
from eval_lib.dataset_helper import DatasetMetadata
from eval_lib.dataset_helper import download_dataset
from eval_lib.dataset_helper import enforce_epsilon_and_compute_hash
from eval_lib.image_batches import AversarialBatches
from eval_lib.image_batches import DatasetBatches
from eval_lib.submissions import CompetitionSubmissions
from eval_lib.work_data import AttackWorkPieces
from eval_lib.work_data import DefenseWorkPieces
|
normal
|
{
"blob_id": "64935ae910d5f330722b637dcc5794e7e07ab52d",
"index": 8375,
"step-1": "<mask token>\n",
"step-2": "from eval_lib.classification_results import analyze_one_classification_result\nfrom eval_lib.classification_results import ClassificationBatches\nfrom eval_lib.cloud_client import CompetitionDatastoreClient\nfrom eval_lib.cloud_client import CompetitionStorageClient\nfrom eval_lib.dataset_helper import DatasetMetadata\nfrom eval_lib.dataset_helper import download_dataset\nfrom eval_lib.dataset_helper import enforce_epsilon_and_compute_hash\nfrom eval_lib.image_batches import AversarialBatches\nfrom eval_lib.image_batches import DatasetBatches\nfrom eval_lib.submissions import CompetitionSubmissions\nfrom eval_lib.work_data import AttackWorkPieces\nfrom eval_lib.work_data import DefenseWorkPieces\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python
#--coding: utf8--
import time
if __name__ == '__main__':
date = time.strftime('%m-%d')
if date == '03-08':
print '女神节'
elif date == '02-14':
print '情人节'
else:
print '发红包'
print '这是一个测试题'
|
normal
|
{
"blob_id": "23375760c0943ca177b7009031d9d17a91165c5c",
"index": 230,
"step-1": "#!/usr/bin/env python\n#--coding: utf8--\nimport time\n\nif __name__ == '__main__':\n date = time.strftime('%m-%d')\n if date == '03-08':\n print '女神节'\n elif date == '02-14':\n print '情人节'\n else:\n print '发红包'\n print '这是一个测试题'",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import graphics
import ply.lex as lex
import ply.yacc as yacc
import jstokens
import jsgrammar
def interpret(trees): # Hello, friend
for tree in trees: # Hello,
# ("word-element","Hello")
nodetype=tree[0] # "word-element"
if nodetype == "word-element":
graphics.word(tree[1])
elif nodetype == "tag-element":
# <b>Strong text</b>
tagname = tree[1] # b
tagargs = tree[2] # []
subtrees = tree[3] # ...Strong Text!...
closetagname = tree[4] # b
if(tagname!=closetagname):
graphics.warning("mismatched tag")
else:
graphics.begintag(tagname,tagargs)
interpret(subtrees)
graphics.endtag()
elif nodetype == "javascript-element":
jstext = tree[1]; # "document.write(55);"
jslexer = lex.lex(module=jstokens)
jsparser = yacc.yacc(module=jsgrammar)
jstree = jsparser.parse(jstext,lexer=jslexer)
# jstree is a parse tree for JavaScript
result = jsinterp.interpret(jstree)
graphics.word(result)
|
normal
|
{
"blob_id": "f3b3bee494493263f8b00827e6f3ff3a1dcd8c37",
"index": 6144,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef interpret(trees):\n for tree in trees:\n nodetype = tree[0]\n if nodetype == 'word-element':\n graphics.word(tree[1])\n elif nodetype == 'tag-element':\n tagname = tree[1]\n tagargs = tree[2]\n subtrees = tree[3]\n closetagname = tree[4]\n if tagname != closetagname:\n graphics.warning('mismatched tag')\n else:\n graphics.begintag(tagname, tagargs)\n interpret(subtrees)\n graphics.endtag()\n elif nodetype == 'javascript-element':\n jstext = tree[1]\n jslexer = lex.lex(module=jstokens)\n jsparser = yacc.yacc(module=jsgrammar)\n jstree = jsparser.parse(jstext, lexer=jslexer)\n result = jsinterp.interpret(jstree)\n graphics.word(result)\n",
"step-3": "import graphics\nimport ply.lex as lex\nimport ply.yacc as yacc\nimport jstokens\nimport jsgrammar\n\n\ndef interpret(trees):\n for tree in trees:\n nodetype = tree[0]\n if nodetype == 'word-element':\n graphics.word(tree[1])\n elif nodetype == 'tag-element':\n tagname = tree[1]\n tagargs = tree[2]\n subtrees = tree[3]\n closetagname = tree[4]\n if tagname != closetagname:\n graphics.warning('mismatched tag')\n else:\n graphics.begintag(tagname, tagargs)\n interpret(subtrees)\n graphics.endtag()\n elif nodetype == 'javascript-element':\n jstext = tree[1]\n jslexer = lex.lex(module=jstokens)\n jsparser = yacc.yacc(module=jsgrammar)\n jstree = jsparser.parse(jstext, lexer=jslexer)\n result = jsinterp.interpret(jstree)\n graphics.word(result)\n",
"step-4": "import graphics\nimport ply.lex as lex\nimport ply.yacc as yacc\nimport jstokens\nimport jsgrammar\n\ndef interpret(trees): # Hello, friend\n for tree in trees: # Hello,\n # (\"word-element\",\"Hello\")\n nodetype=tree[0] # \"word-element\"\n if nodetype == \"word-element\":\n graphics.word(tree[1]) \n elif nodetype == \"tag-element\":\n # <b>Strong text</b>\n tagname = tree[1] # b\n tagargs = tree[2] # []\n subtrees = tree[3] # ...Strong Text!...\n closetagname = tree[4] # b\n if(tagname!=closetagname):\n graphics.warning(\"mismatched tag\")\n else:\n graphics.begintag(tagname,tagargs)\n interpret(subtrees)\n graphics.endtag()\n elif nodetype == \"javascript-element\":\n jstext = tree[1]; # \"document.write(55);\"\n jslexer = lex.lex(module=jstokens)\n jsparser = yacc.yacc(module=jsgrammar)\n jstree = jsparser.parse(jstext,lexer=jslexer)\n # jstree is a parse tree for JavaScript\n result = jsinterp.interpret(jstree)\n graphics.word(result)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Employee', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('first_name', models.CharField(blank=
True, max_length=30, null=True)), ('last_name', models.CharField(
blank=True, max_length=30, null=True)), ('gender', models.CharField
(blank=True, max_length=10, null=True)), ('email', models.
EmailField(blank=True, max_length=255, null=True)), ('phone_number',
models.CharField(blank=True, max_length=20, null=True)), ('address',
models.TextField(blank=True, max_length=255, null=True)), ('city',
models.CharField(blank=True, max_length=50, null=True)), ('state',
models.CharField(blank=True, max_length=50, null=True)), (
'post_code', models.CharField(blank=True, max_length=10, null=True)
), ('comment', models.TextField(blank=True, max_length=255, null=
True))]), migrations.CreateModel(name='Item', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('barcode', models.CharField(blank=True,
max_length=100, null=True)), ('item_name', models.CharField(blank=
True, max_length=100, null=True)), ('catagory', models.CharField(
blank=True, max_length=100, null=True)), ('wholesale_price', models
.FloatField(blank=True, null=True)), ('retail_price', models.
FloatField(blank=True, null=True)), ('tax', models.FloatField(blank
=True, null=True)), ('quantity_stock', models.IntegerField(blank=
True, null=True)), ('receiving_quantity', models.IntegerField(blank
=True, null=True)), ('description', models.TextField(blank=True,
max_length=1000, null=True)), ('image', models.ImageField(blank=
True, default='no-img.jpg', null=True, upload_to='item/')), (
'item_has_serial_number', models.BooleanField(default=False)), (
'reorder_level', models.CharField(blank=True, max_length=10, null=
True))]), migrations.CreateModel(name='Customer', fields=[(
'employee_ptr', models.OneToOneField(auto_created=True, on_delete=
django.db.models.deletion.CASCADE, parent_link=True, primary_key=
True, serialize=False, to='account.Employee'))], bases=(
'account.employee',)), migrations.CreateModel(name='Supplier',
fields=[('employee_ptr', models.OneToOneField(auto_created=True,
on_delete=django.db.models.deletion.CASCADE, parent_link=True,
primary_key=True, serialize=False, to='account.Employee')), (
'company_name', models.CharField(blank=True, max_length=100, null=
True))], bases=('account.employee',)), migrations.AddField(
model_name='item', name='supplier', field=models.ForeignKey(blank=
True, null=True, on_delete=django.db.models.deletion.CASCADE, to=
'account.Supplier'))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Employee', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('first_name', models.CharField(blank=
True, max_length=30, null=True)), ('last_name', models.CharField(
blank=True, max_length=30, null=True)), ('gender', models.CharField
(blank=True, max_length=10, null=True)), ('email', models.
EmailField(blank=True, max_length=255, null=True)), ('phone_number',
models.CharField(blank=True, max_length=20, null=True)), ('address',
models.TextField(blank=True, max_length=255, null=True)), ('city',
models.CharField(blank=True, max_length=50, null=True)), ('state',
models.CharField(blank=True, max_length=50, null=True)), (
'post_code', models.CharField(blank=True, max_length=10, null=True)
), ('comment', models.TextField(blank=True, max_length=255, null=
True))]), migrations.CreateModel(name='Item', fields=[('id', models
.AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('barcode', models.CharField(blank=True,
max_length=100, null=True)), ('item_name', models.CharField(blank=
True, max_length=100, null=True)), ('catagory', models.CharField(
blank=True, max_length=100, null=True)), ('wholesale_price', models
.FloatField(blank=True, null=True)), ('retail_price', models.
FloatField(blank=True, null=True)), ('tax', models.FloatField(blank
=True, null=True)), ('quantity_stock', models.IntegerField(blank=
True, null=True)), ('receiving_quantity', models.IntegerField(blank
=True, null=True)), ('description', models.TextField(blank=True,
max_length=1000, null=True)), ('image', models.ImageField(blank=
True, default='no-img.jpg', null=True, upload_to='item/')), (
'item_has_serial_number', models.BooleanField(default=False)), (
'reorder_level', models.CharField(blank=True, max_length=10, null=
True))]), migrations.CreateModel(name='Customer', fields=[(
'employee_ptr', models.OneToOneField(auto_created=True, on_delete=
django.db.models.deletion.CASCADE, parent_link=True, primary_key=
True, serialize=False, to='account.Employee'))], bases=(
'account.employee',)), migrations.CreateModel(name='Supplier',
fields=[('employee_ptr', models.OneToOneField(auto_created=True,
on_delete=django.db.models.deletion.CASCADE, parent_link=True,
primary_key=True, serialize=False, to='account.Employee')), (
'company_name', models.CharField(blank=True, max_length=100, null=
True))], bases=('account.employee',)), migrations.AddField(
model_name='item', name='supplier', field=models.ForeignKey(blank=
True, null=True, on_delete=django.db.models.deletion.CASCADE, to=
'account.Supplier'))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-24 11:30
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Employee',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=30, null=True)),
('last_name', models.CharField(blank=True, max_length=30, null=True)),
('gender', models.CharField(blank=True, max_length=10, null=True)),
('email', models.EmailField(blank=True, max_length=255, null=True)),
('phone_number', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=255, null=True)),
('city', models.CharField(blank=True, max_length=50, null=True)),
('state', models.CharField(blank=True, max_length=50, null=True)),
('post_code', models.CharField(blank=True, max_length=10, null=True)),
('comment', models.TextField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('barcode', models.CharField(blank=True, max_length=100, null=True)),
('item_name', models.CharField(blank=True, max_length=100, null=True)),
('catagory', models.CharField(blank=True, max_length=100, null=True)),
('wholesale_price', models.FloatField(blank=True, null=True)),
('retail_price', models.FloatField(blank=True, null=True)),
('tax', models.FloatField(blank=True, null=True)),
('quantity_stock', models.IntegerField(blank=True, null=True)),
('receiving_quantity', models.IntegerField(blank=True, null=True)),
('description', models.TextField(blank=True, max_length=1000, null=True)),
('image', models.ImageField(blank=True, default='no-img.jpg', null=True, upload_to='item/')),
('item_has_serial_number', models.BooleanField(default=False)),
('reorder_level', models.CharField(blank=True, max_length=10, null=True)),
],
),
migrations.CreateModel(
name='Customer',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
],
bases=('account.employee',),
),
migrations.CreateModel(
name='Supplier',
fields=[
('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),
('company_name', models.CharField(blank=True, max_length=100, null=True)),
],
bases=('account.employee',),
),
migrations.AddField(
model_name='item',
name='supplier',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Supplier'),
),
]
|
flexible
|
{
"blob_id": "56157aaf3f98abc58572b45111becb91cb93f328",
"index": 2926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Employee', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('first_name', models.CharField(blank=\n True, max_length=30, null=True)), ('last_name', models.CharField(\n blank=True, max_length=30, null=True)), ('gender', models.CharField\n (blank=True, max_length=10, null=True)), ('email', models.\n EmailField(blank=True, max_length=255, null=True)), ('phone_number',\n models.CharField(blank=True, max_length=20, null=True)), ('address',\n models.TextField(blank=True, max_length=255, null=True)), ('city',\n models.CharField(blank=True, max_length=50, null=True)), ('state',\n models.CharField(blank=True, max_length=50, null=True)), (\n 'post_code', models.CharField(blank=True, max_length=10, null=True)\n ), ('comment', models.TextField(blank=True, max_length=255, null=\n True))]), migrations.CreateModel(name='Item', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('barcode', models.CharField(blank=True,\n max_length=100, null=True)), ('item_name', models.CharField(blank=\n True, max_length=100, null=True)), ('catagory', models.CharField(\n blank=True, max_length=100, null=True)), ('wholesale_price', models\n .FloatField(blank=True, null=True)), ('retail_price', models.\n FloatField(blank=True, null=True)), ('tax', models.FloatField(blank\n =True, null=True)), ('quantity_stock', models.IntegerField(blank=\n True, null=True)), ('receiving_quantity', models.IntegerField(blank\n =True, null=True)), ('description', models.TextField(blank=True,\n max_length=1000, null=True)), ('image', models.ImageField(blank=\n True, default='no-img.jpg', null=True, upload_to='item/')), (\n 'item_has_serial_number', models.BooleanField(default=False)), (\n 'reorder_level', models.CharField(blank=True, max_length=10, null=\n True))]), migrations.CreateModel(name='Customer', fields=[(\n 'employee_ptr', models.OneToOneField(auto_created=True, on_delete=\n django.db.models.deletion.CASCADE, parent_link=True, primary_key=\n True, serialize=False, to='account.Employee'))], bases=(\n 'account.employee',)), migrations.CreateModel(name='Supplier',\n fields=[('employee_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='account.Employee')), (\n 'company_name', models.CharField(blank=True, max_length=100, null=\n True))], bases=('account.employee',)), migrations.AddField(\n model_name='item', name='supplier', field=models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'account.Supplier'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Employee', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('first_name', models.CharField(blank=\n True, max_length=30, null=True)), ('last_name', models.CharField(\n blank=True, max_length=30, null=True)), ('gender', models.CharField\n (blank=True, max_length=10, null=True)), ('email', models.\n EmailField(blank=True, max_length=255, null=True)), ('phone_number',\n models.CharField(blank=True, max_length=20, null=True)), ('address',\n models.TextField(blank=True, max_length=255, null=True)), ('city',\n models.CharField(blank=True, max_length=50, null=True)), ('state',\n models.CharField(blank=True, max_length=50, null=True)), (\n 'post_code', models.CharField(blank=True, max_length=10, null=True)\n ), ('comment', models.TextField(blank=True, max_length=255, null=\n True))]), migrations.CreateModel(name='Item', fields=[('id', models\n .AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('barcode', models.CharField(blank=True,\n max_length=100, null=True)), ('item_name', models.CharField(blank=\n True, max_length=100, null=True)), ('catagory', models.CharField(\n blank=True, max_length=100, null=True)), ('wholesale_price', models\n .FloatField(blank=True, null=True)), ('retail_price', models.\n FloatField(blank=True, null=True)), ('tax', models.FloatField(blank\n =True, null=True)), ('quantity_stock', models.IntegerField(blank=\n True, null=True)), ('receiving_quantity', models.IntegerField(blank\n =True, null=True)), ('description', models.TextField(blank=True,\n max_length=1000, null=True)), ('image', models.ImageField(blank=\n True, default='no-img.jpg', null=True, upload_to='item/')), (\n 'item_has_serial_number', models.BooleanField(default=False)), (\n 'reorder_level', models.CharField(blank=True, max_length=10, null=\n True))]), migrations.CreateModel(name='Customer', fields=[(\n 'employee_ptr', models.OneToOneField(auto_created=True, on_delete=\n django.db.models.deletion.CASCADE, parent_link=True, primary_key=\n True, serialize=False, to='account.Employee'))], bases=(\n 'account.employee',)), migrations.CreateModel(name='Supplier',\n fields=[('employee_ptr', models.OneToOneField(auto_created=True,\n on_delete=django.db.models.deletion.CASCADE, parent_link=True,\n primary_key=True, serialize=False, to='account.Employee')), (\n 'company_name', models.CharField(blank=True, max_length=100, null=\n True))], bases=('account.employee',)), migrations.AddField(\n model_name='item', name='supplier', field=models.ForeignKey(blank=\n True, null=True, on_delete=django.db.models.deletion.CASCADE, to=\n 'account.Supplier'))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-02-24 11:30\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Employee',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('first_name', models.CharField(blank=True, max_length=30, null=True)),\n ('last_name', models.CharField(blank=True, max_length=30, null=True)),\n ('gender', models.CharField(blank=True, max_length=10, null=True)),\n ('email', models.EmailField(blank=True, max_length=255, null=True)),\n ('phone_number', models.CharField(blank=True, max_length=20, null=True)),\n ('address', models.TextField(blank=True, max_length=255, null=True)),\n ('city', models.CharField(blank=True, max_length=50, null=True)),\n ('state', models.CharField(blank=True, max_length=50, null=True)),\n ('post_code', models.CharField(blank=True, max_length=10, null=True)),\n ('comment', models.TextField(blank=True, max_length=255, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('barcode', models.CharField(blank=True, max_length=100, null=True)),\n ('item_name', models.CharField(blank=True, max_length=100, null=True)),\n ('catagory', models.CharField(blank=True, max_length=100, null=True)),\n ('wholesale_price', models.FloatField(blank=True, null=True)),\n ('retail_price', models.FloatField(blank=True, null=True)),\n ('tax', models.FloatField(blank=True, null=True)),\n ('quantity_stock', models.IntegerField(blank=True, null=True)),\n ('receiving_quantity', models.IntegerField(blank=True, null=True)),\n ('description', models.TextField(blank=True, max_length=1000, null=True)),\n ('image', models.ImageField(blank=True, default='no-img.jpg', null=True, upload_to='item/')),\n ('item_has_serial_number', models.BooleanField(default=False)),\n ('reorder_level', models.CharField(blank=True, max_length=10, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),\n ],\n bases=('account.employee',),\n ),\n migrations.CreateModel(\n name='Supplier',\n fields=[\n ('employee_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='account.Employee')),\n ('company_name', models.CharField(blank=True, max_length=100, null=True)),\n ],\n bases=('account.employee',),\n ),\n migrations.AddField(\n model_name='item',\n name='supplier',\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Supplier'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/python
# The program was written by YU Ho Yung and LEUNG Kin Tung in group 24.
# The program is written with stubs for the phase 3
#
import pymongo
from scrapy.selector import Selector
import pandas as pd
# import numpy as np
import datetime
import re
import pprint
import subprocess
import scrapy
from pymongo import MongoClient
import model4
import model5
def isNonNegativeFloat(floatNumberInput):
try:
if(float(floatNumberInput) >= 0):
return True
except ValueError as err:
print('Your input is not a non-negative number: ', err)
# pass
return False
def isCourseCode(corseCode):
try:
matchObj = re.match(r'[A-Z]?[A-Z]?[A-Z]?[A-Z]?\d?\d?\d?\d?([A-Z]?)',str(corseCode))
if( matchObj != None):
return True
except ValueError as err:
print('Your courseCode is not correct: ', err)
# pass
return False
def isIntNumber(intNumberInput):
try:
int(intNumberInput)
return True
except ValueError as err:
print('Your number is not an integer: ', err)
# pass
return False
'''
5.1 Collection Dropping and Empty Collection Creating
(This feature will be used for the demonstration purpose.
The detailed implementation of this feature will be completed by you in Phase 3.)
Input:
none
Output:
Display a message “Collection dropping and empty collection creating are successful”
(after the collection(s) is(are) removed and the new empty collection(s) is(are) created).
'''
# to handle the function "Collection Dropping and Empty Collection Creating"
def collectionDroppingAndEmptyCollectionCreatingHandler(db):
# to execute the function "update address"
collectionDroppingAndEmptyCollectionCreating(db)
def collectionDroppingAndEmptyCollectionCreating(db):
#A function to drop and empty all collections
# Dropping Collection
try:
print("Dropping Collection...")
print(" Dropping collection \'course\'...")
db.course.drop()
except pymongo.errors.ConnectionFailure as error:
print("Collection Dropping Failed! Error Message: \"{}\"".format(error))
print("Collection dropping and empty collection creating are successful")
'''
5.2 Data Crawling
(The detailed implementation of this feature will be completed by you in Phase 3.)
Input:
a URL (e.g., “http://course.cse.ust.hk/comp4332/index.html”) or
a special keyword (i.e., “default”)
Output:
If the input is “default”, display a message “Data Crawling is successful and all data are inserted into the database”
(after all data are crawled from the default URL given in the project webpage and are inserted into the database).
Otherwise, do the same prompt operation but the URL used is the URL typed in the input.
'''
def dataCrawlingHandler():
url = input("Please input the URL for Data Crawling: ")
dataCrawling(url)
def dataCrawling(url):
print("Data Crawling started")
if(str(url).lower() == "default" or url == ""):
#implement the crawling function from default website
# Inserting Documents
url = 'http://comp4332.com/realistic'
#testing
# url = 'http://comp4332.com/trial'
with open('url.txt', 'w') as f: #Please double check if this works
f.write(str(url))
strCommand = "scrapy crawl ustWebpageSpider" #referred to ustWebpageSpider.py
subprocess.run(strCommand, shell=True)
print("Data Crawling is successful and all data from default are inserted into the database")
else:
with open('url.txt', 'w') as f: #Please double check if this works
f.write(str(url))
strCommand = "scrapy crawl ustWebpageSpider" #referred to ustWebpageSpider.py
subprocess.run(strCommand, shell=True)
# implement the crawling function from the given url
print("Data Crawling is successful and all data are inserted into the database from: ", str(url))
#The detailed implementation of this feature will be completed in Phase 3
'''
5.3 Course Search
(The detailed implementation of this feature will be completed by you in Phase 4.
But, the design of this feature will be completed in Phase 2.)
We have the following two operations for a course search.
1. Course Search by Keyword
2. Course Search by Waiting List Size
Note: Although there are some missing data in this project (which may require “prediction”),
in this part/feature, you just perform these operations for a course search only based on the data given to you.
There is no need to perform any “prediction” in this part.
'''
#def courseSearch():
#This is just an abstraction here, not even a stub.
#The detailed implementation of this feature will be completed in Phase 4.
'''
5.3.1 Course Search by Keyword
Input:
a text (k) where this text is called “keyword(s)”
Output:
A list of courses which course titles, course description or course remarks match the given text k.
In the output, for each course, please show “Course Code”, “Course Title”, “No. of Units/Credits”,
a list of sections of the course each with “Section”, “Date & Time”, “Quota”, “Enrol”, “Avail” and “Wait”.
Please sort the list of courses in ascending order of “Course Code”.
(Within a single course, please sort in ascending order of “Sections”)
We say that a phrase P matches text k if at least one of the words in phrase P is equal to one of words in k.
For example, if P = “Big Data Mining and Management” and k = “Mining”, then P matches k.
If P = “Big Data Mining and Management” and k = “Risk Mining”, then P matches k too.
If P = “Big Data Mining and Management” and k = “Mining Management”, then P matches k.
'''
# "lectureSection" is optional
# "satisfied" is optional
def outputCourseDetails(courseCode, lectureSection = 0, satisfied = ""):
#: search the course code which match in database
#TODO: print the Course Details of the Course Code
if(satisfied == ""):
cols = ["Course Code", "Course Title", "No. of Units/Credits", "Section", "Date & Time", "Quota", "Enrol", "Avail","Wait"]
df = pd.DataFrame({"Course Code" : ["COMP1001", "COMP1021"],"Course Title": ["Exploring Multimedia and Internet Computing","Introduction to Computer Science"],"No. of Units/Credits":[3,3], "Section":["L1,L2","L1"], "Date & Time":["Th 03:00PM - 04:50PM","TuTh 04:30PM - 05:20PM"], "Quota":[67,80], "Enrol":[19,75], "Avail":[48,5],"Wait":[0,26]},columns=cols)
print(df)
#return df.values.tolist()
return df
else:
cols = ["Course Code", "Course Title", "No. of Units/Credits", "Section", "Date & Time", "Quota", "Enrol", "Avail","Wait", "Satisfied"]
df = pd.DataFrame({"Course Code" : ["COMP1001", "COMP1021"],"Course Title": ["Exploring Multimedia and Internet Computing","Introduction to Computer Science"],"No. of Units/Credits":[3,3], "Section":["L1,L2","L1"], "Date & Time":["Th 03:00PM - 04:50PM","TuTh 04:30PM - 05:20PM"], "Quota":[67,80], "Enrol":[19,75], "Avail":[48,5],"Wait":[0,26], "Satisfied":["Yes","No"]},columns=cols)
print(df)
#return df.values.tolist()
return df
def courseSearchByKeywordHandler(db):
keyword = input("Please input a keyword for searching : ")
courseSearchByKeyword(db,keyword)
def courseSearchByKeyword(db,keyword):
keyword = keyword.split()
keyword = "|".join(keyword)
#TODO:Use the keyword to find a list of courses.
#The keyword will be searched in course titles, course description or course remarks.
try:
print("Querying Documents...")
print(" Finding a list of course which title....")
# listOfCourse = db.course.find()
listOfCourse = db.course.aggregate([
{
"$match": {
"$or": [
{"title": {'$regex': keyword}},
{"description": {'$regex': keyword}},
{"colistWith": {'$regex': keyword}}
]
}
},
{
"$unwind": "$sections"
},
{
"$sort": {"sections.recordTime": 1 }
},
{
"$group":{
"_id":{"sid":"$sections.sectionId", "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"sections":{"$last": "$sections"},
"description":{"$last":"$description"}
}
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait"
}
},
"description":{"$last":"$description"}
}
},
{
"$project":{"_id":0,"code":1,"title":1,"credits":1,"sections":1,"description":1}
}
])
recordNo = 0
for oneCourse in listOfCourse:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
for oneSection in oneCourse["sections"]:
print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
print("description: {:s}".format(oneCourse["description"]))
# pprint.pprint(oneCourse)
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
# courseCode = "COMP1001"
# return outputCourseDetails(courseCode)
'''
5.3.2 Course Search by Waiting List Size
Input:
A non-negative real number f
Starting Time Slot (start_ts)
Ending Time Slot (end_ts)
Output:
A list of courses each of which has a lecture section (e.g., “L1” and “L2”) in a time slot,
says match_ts,between start_ts (inclusively) and end_ts (inclusively)
where
the number of students in the waiting list of this lecture section is
greater than or equal to
f multiplied by the number of students enrolled in this lecture section in that timeslot.
In the output, for each “distinct” course, please show
“Course Code”,
Course Title”,
“No. of Units/Credits”,
“Matched Time Slot”,
a list of sections (including both lecture 9/17 COMP4332/RMBI4310 Project (Spring 2018) Course Registration Data Analytics
sections and non-lecture sections)
of the course each with “Section”,
“Date & Time”,
“Quota”,
“Enrol”,
“Avail”,
“Wait” and
“Satisfied”
(all shown with the content/values recorded in the time slot match_ts).
Note that “Matched Time Slot” is a new attribute in this query and it is equal to match_ts.
If a single course satisfies the required condition in multiple time slots
(no matter which lecture section of this course satisfies the required condition),
we just show the latest time slot among all these time slots in which this course satisfies the required condition.
Thus, each course should appear at most once in the output.
Note that “Satisfied” is another new attribute in this query.
It is equal to “Yes”
if the number of students in the waiting list of this section
is greater than or equal to
f multiplied by the number ofstudents enrolled in this section in that time slot.
It is equal to “No” otherwise.
Attribute “Satisfied” is not needed to be considered in Phase 2.
Please sort the list of courses in ascending order of “Course Code”.
(Within a single course, please sort in ascending order of “Sections”)
'''
def courseSearchByWaitingListSizeHandler(db):
correctInput = False
while(correctInput == False):
f = input("Please input a non-negative real number: ")
correctInput = isNonNegativeFloat(f)
start_ts = input("Please input a Starting Time Slot: ")
end_ts = input("Please input a Ending Time Slot : ")
courseSearchByWaitingListSize(db, f, start_ts, end_ts)
# A non-negative real number f
# Starting Time Slot (start_ts)
# Ending Time Slot (end_ts)
def courseSearchByWaitingListSize(db, f, start_ts, end_ts):
#TODO: A function uses the Waiting List Size number to find a list of courses and output a list of course code with lecture section
# satisfied = "Yes"
# f = 0.01
try:
print("Querying Documents...")
listOfCourseWithWaitingListSize = db.course.aggregate([
{ "$unwind": "$sections" },
{"$match":
{"$and":[
{"sections.recordTime": {"$gte": datetime.datetime.strptime(start_ts, "%Y-%m-%d %H:%M")}},
{"sections.recordTime": {"$lte": datetime.datetime.strptime(end_ts, "%Y-%m-%d %H:%M")}}
]
}
},
{ "$project":
{"code": 1,
"title": 1,
"credits": 1,
"sections":1,
"satisfied":{"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]},
"lecSatisfied":{
"$cond":[{
"$and":[
{
"$gte":["$sections.wait",{"$multiply":["$sections.enrol",float(f)]}]
},
{
"$eq":[{"$substr": ["$sections.sectionId",0,1]},"L"]
}
]
},1,0]
}
},
},
{
"$sort": {"sections.sectionId": 1 }
},
{
"$group":{
"_id":{ "code": "$code", "recordTime":"$sections.recordTime"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$sections.recordTime"},
"sections":{
"$push": {
"sectionId":"$sections.sectionId",
"dateAndTime":"$sections.offerings.dateAndTime",
"quota":"$sections.quota",
"enrol":"$sections.enrol",
"avail": { "$subtract": [ "$sections.quota", "$sections.enrol"] } ,
"wait":"$sections.wait",
"satisfied":"$satisfied",
}
},
"lecSatisfiedCount":{"$sum":"$lecSatisfied"}
}
},
{ "$match": {"lecSatisfiedCount": {"$gt":0}}
},
{
"$sort": {"recordTime": 1 }
},
{
"$group":{
"_id":{ "code": "$code"},
"code": {"$last": "$code"},
"title": {"$last": "$title"},
"credits": {"$last": "$credits"},
"recordTime":{"$last": "$recordTime"},
"sections":{"$last": "$sections"}
}
},
{
"$project":{
"_id":0,
"code": 1,
"title":1,
"credits": 1,
"recordTime":1,
"sections":1
}
}
]
)
recordNo = 0
for oneCourse in listOfCourseWithWaitingListSize:
recordNo = recordNo + 1
print("Record {:d}:".format(recordNo))
pprint.pprint(oneCourse)
# print("code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}".format(oneCourse["code"], oneCourse["title"], oneCourse["credits"],oneCourse["sections"][0]["quota"],oneCourse["sections"][0]["enrol"],oneCourse["sections"][0]["avail"],oneCourse["sections"][0]["wait"]))
# for oneSection in oneCourse["sections"]:
# print("sections: {:s}, Date & Time: {:s}".format(oneSection["sectionId"],' '.join(oneSection["dateAndTime"])))
# print("description: {:s}".format(oneCourse["description"]))
#pprint(" Record {:d}: (sid={:s}, sname={:s}, byear={:d})".format(recordNo, oneStudent["sid"], oneStudent["sname"], oneStudent["byear"]))
#print("Record {:d}: (course={:s})".format(recordNo, oneCourse))
except pymongo.errors.ConnectionFailure as error:
print("Document Querying Failed! Error Message: \"{}\"".format(error))
#return outputCourseDetails(courseCode, lectureSection, satisfied)
'''
5.4 Waiting List Size Prediction
(The detailed implementation of this feature will be completed by you in Phase 6.
But, the design of this feature will be completed in Phase 5.)
Input:
Course Code (cc)
Lecture number (ln) (e.g., the input should be “1” denoting “L1”)
Time Slot (ts)
Output:
“N1,N2,N3,N4,N5”
where Ni denotes the number of students in the waiting list of the lecture number (ln) (if any) of the course cc
in the given time slot (ts) predicted by Model i for each i in [1, 5]
(Note that these 5 numbers are integers.)
Note: Since we know that training a model may take some time, in general, “cc” could be any course code.
However, in our demonstration, we will test with the course code “cc” starting from “COMP1942”, “COMP42”, “COMP43” or “RMBI” only
(i.e., (1) the COMP course (“COMP1942”), (2) any COMP course with starting course digits equal to “42” or “43” and (3) any RMBI course).
Thus, you just need to train your model with the data from the course with the course code “cc” starting from
these course code prefixes described above before our demonstration.
When we use this feature in our demonstration, you just need to load the trained model and perform the prediction of this feature based on the trained model.
If there is no lecture section of the course (cc) specified in the input or if the lecture number entered (ln) is not offered for the course (cc) specified in the input,
we just need to show “There is no lecture section and thus there is no prediction result.”
Although it is possible that we could use 5 models for one course and we could also use 5 “different” models for another course,
for the sake of simplicity, please use the same 5 models for any course needed.
Of course, even if the 5 models are the same (with the same set of “input” parameter values) for any two courses,
we know that each of the 5 models could be trained with different enrollment data from different courses, resulting in different “model” parameter values
(e.g., the weight values between neurons in a neural network which should be found from the data).
'''
def waitingListSizePredictionHandler(db):
correctInput = False
while(correctInput == False):
cc = input("Please input a Course Code: ")
cc = str(cc).upper()
correctInput= isCourseCode(cc)
correctInput = False
while(correctInput == False):
ln = input("Please input a Lecture number: ")
correctInput = isIntNumber(ln)
ts = input("Please input a Time Slot : ")
N1, N2, N3, N4, N5 = waitingListSizePrediction(cc,ln,ts)
print("The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:")
print(N1,",", N2,",", N3,",", N4,",", N5)
'''
5.5 Waiting List Size Training
(This feature will be used for your “own” training purpose before we have the real feature from Section 5.4.
The detailed implementation of this feature will be completed by you in Phase 6.
But, the design of this feature will be completed in Phase 5.)
Input:
none
Output:
Display a message “Waiting list size training is successful” (after the training process on the waiting list size finishes).
'''
def waitingListSizeTraining():
#TODO: The function for the training process on the waiting list size
print("Waiting list size training is successful")
return courseData
# Course Code (cc)
# Lecture number (ln) (e.g., the input should be “1” denoting “L1”)
# Time Slot (ts)
def waitingListSizePrediction(courseCode,lectureNumber, timeslot):
# courseData = waitingListSizeTraining()
#TODO: Create 5 model to find the prediction
# timeslot = "2018-01-26 22:30"
earliestTime = datetime.datetime.strptime("2018-01-25T09:00Z", "%Y-%m-%dT%H:%MZ").timestamp()
timeslot = int((datetime.datetime.strptime(timeslot, "%Y-%m-%d %H:%M").timestamp() - earliestTime)/1800)
lectureNumber= int(str(lectureNumber)[-1])
courseCode = str(courseCode).upper()
# print(courseData)
N1 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+1
N2 = model4.predictionHandler(courseCode,lectureNumber, timeslot)-1
N3 = model4.predictionHandler(courseCode,lectureNumber, timeslot)
N4 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+2
N5 = model5.predictionHandler(courseCode,lectureNumber, timeslot)
#There are 5 Models to predict 5 different (int) result
#N1, N2, N3, N4, N5 = 11,12,11,14,13
return int(N1), int(N2), int(N3), int(N4), int(N5)
# to display the system interface with stubs
def main():
try:
# Making a DB connection
print("Making a MongoDB connection...")
client = MongoClient("mongodb://localhost:27017")
# Getting a Database named "course"
print("Getting a database named \"course\"")
db = client["hkust"]
# here, we need to implement for the flow
# display the menu
choice = "0"
while (choice != "6"):
print("")
print(" Main Menu")
print("=========================")
print("1. Collection Dropping and Empty Collection Creating")
print("2. Data Crawling")
print("3. Course Search by Keyword")
print("4. Course Search by Waiting List Size")
print("5. Waiting List Size Prediction")
print("6. Exit")
print("")
# allow the user to choose one of the functions in the menu
choice = input("Please input your choice (1-6): ")
print("")
# check the input and call the correspondence function
if (choice == "1"):
collectionDroppingAndEmptyCollectionCreatingHandler(db)
elif (choice == "2"):
dataCrawlingHandler()
elif (choice == "3"):
courseSearchByKeywordHandler(db)
elif (choice == "4"):
courseSearchByWaitingListSizeHandler(db)
elif (choice == "5"):
waitingListSizePredictionHandler(db)
elif (choice == "6"):
print("")
else:
print("Invalid Input!")
client.close()
except pymongo.errors.ConnectionFailure as error:
print("DB Connection Failed! Error Message: \"{}\"".format(error))
main()
|
normal
|
{
"blob_id": "b3a07107ef64bb50f4768954cbb579d8e66bd003",
"index": 6612,
"step-1": "<mask token>\n\n\ndef isCourseCode(corseCode):\n try:\n matchObj = re.match('[A-Z]?[A-Z]?[A-Z]?[A-Z]?\\\\d?\\\\d?\\\\d?\\\\d?([A-Z]?)',\n str(corseCode))\n if matchObj != None:\n return True\n except ValueError as err:\n print('Your courseCode is not correct: ', err)\n return False\n\n\n<mask token>\n\n\ndef dataCrawling(url):\n print('Data Crawling started')\n if str(url).lower() == 'default' or url == '':\n url = 'http://comp4332.com/realistic'\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data from default are inserted into the database'\n )\n else:\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data are inserted into the database from: '\n , str(url))\n\n\n<mask token>\n\n\ndef courseSearchByKeywordHandler(db):\n keyword = input('Please input a keyword for searching : ')\n courseSearchByKeyword(db, keyword)\n\n\n<mask token>\n\n\ndef courseSearchByWaitingListSize(db, f, start_ts, end_ts):\n try:\n print('Querying Documents...')\n listOfCourseWithWaitingListSize = db.course.aggregate([{'$unwind':\n '$sections'}, {'$match': {'$and': [{'sections.recordTime': {\n '$gte': datetime.datetime.strptime(start_ts, '%Y-%m-%d %H:%M')}\n }, {'sections.recordTime': {'$lte': datetime.datetime.strptime(\n end_ts, '%Y-%m-%d %H:%M')}}]}}, {'$project': {'code': 1,\n 'title': 1, 'credits': 1, 'sections': 1, 'satisfied': {'$gte':\n ['$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}\n ]}, 'lecSatisfied': {'$cond': [{'$and': [{'$gte': [\n '$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}]\n }, {'$eq': [{'$substr': ['$sections.sectionId', 0, 1]}, 'L']}]},\n 1, 0]}}}, {'$sort': {'sections.sectionId': 1}}, {'$group': {\n '_id': {'code': '$code', 'recordTime': '$sections.recordTime'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'recordTime': {'$last':\n '$sections.recordTime'}, 'sections': {'$push': {'sectionId':\n '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait', 'satisfied': '$satisfied'}},\n 'lecSatisfiedCount': {'$sum': '$lecSatisfied'}}}, {'$match': {\n 'lecSatisfiedCount': {'$gt': 0}}}, {'$sort': {'recordTime': 1}},\n {'$group': {'_id': {'code': '$code'}, 'code': {'$last': '$code'\n }, 'title': {'$last': '$title'}, 'credits': {'$last':\n '$credits'}, 'recordTime': {'$last': '$recordTime'}, 'sections':\n {'$last': '$sections'}}}, {'$project': {'_id': 0, 'code': 1,\n 'title': 1, 'credits': 1, 'recordTime': 1, 'sections': 1}}])\n recordNo = 0\n for oneCourse in listOfCourseWithWaitingListSize:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n pprint.pprint(oneCourse)\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef waitingListSizePredictionHandler(db):\n correctInput = False\n while correctInput == False:\n cc = input('Please input a Course Code: ')\n cc = str(cc).upper()\n correctInput = isCourseCode(cc)\n correctInput = False\n while correctInput == False:\n ln = input('Please input a Lecture number: ')\n correctInput = isIntNumber(ln)\n ts = input('Please input a Time Slot : ')\n N1, N2, N3, N4, N5 = waitingListSizePrediction(cc, ln, ts)\n print('The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:')\n print(N1, ',', N2, ',', N3, ',', N4, ',', N5)\n\n\n<mask token>\n\n\ndef main():\n try:\n print('Making a MongoDB connection...')\n client = MongoClient('mongodb://localhost:27017')\n print('Getting a database named \"course\"')\n db = client['hkust']\n choice = '0'\n while choice != '6':\n print('')\n print(' Main Menu')\n print('=========================')\n print('1. Collection Dropping and Empty Collection Creating')\n print('2. Data Crawling')\n print('3. Course Search by Keyword')\n print('4. Course Search by Waiting List Size')\n print('5. Waiting List Size Prediction')\n print('6. Exit')\n print('')\n choice = input('Please input your choice (1-6): ')\n print('')\n if choice == '1':\n collectionDroppingAndEmptyCollectionCreatingHandler(db)\n elif choice == '2':\n dataCrawlingHandler()\n elif choice == '3':\n courseSearchByKeywordHandler(db)\n elif choice == '4':\n courseSearchByWaitingListSizeHandler(db)\n elif choice == '5':\n waitingListSizePredictionHandler(db)\n elif choice == '6':\n print('')\n else:\n print('Invalid Input!')\n client.close()\n except pymongo.errors.ConnectionFailure as error:\n print('DB Connection Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef isNonNegativeFloat(floatNumberInput):\n try:\n if float(floatNumberInput) >= 0:\n return True\n except ValueError as err:\n print('Your input is not a non-negative number: ', err)\n return False\n\n\ndef isCourseCode(corseCode):\n try:\n matchObj = re.match('[A-Z]?[A-Z]?[A-Z]?[A-Z]?\\\\d?\\\\d?\\\\d?\\\\d?([A-Z]?)',\n str(corseCode))\n if matchObj != None:\n return True\n except ValueError as err:\n print('Your courseCode is not correct: ', err)\n return False\n\n\n<mask token>\n\n\ndef collectionDroppingAndEmptyCollectionCreatingHandler(db):\n collectionDroppingAndEmptyCollectionCreating(db)\n\n\ndef collectionDroppingAndEmptyCollectionCreating(db):\n try:\n print('Dropping Collection...')\n print(\" Dropping collection 'course'...\")\n db.course.drop()\n except pymongo.errors.ConnectionFailure as error:\n print('Collection Dropping Failed! Error Message: \"{}\"'.format(error))\n print('Collection dropping and empty collection creating are successful')\n\n\n<mask token>\n\n\ndef dataCrawling(url):\n print('Data Crawling started')\n if str(url).lower() == 'default' or url == '':\n url = 'http://comp4332.com/realistic'\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data from default are inserted into the database'\n )\n else:\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data are inserted into the database from: '\n , str(url))\n\n\n<mask token>\n\n\ndef outputCourseDetails(courseCode, lectureSection=0, satisfied=''):\n if satisfied == '':\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26]},\n columns=cols)\n print(df)\n return df\n else:\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait',\n 'Satisfied']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26],\n 'Satisfied': ['Yes', 'No']}, columns=cols)\n print(df)\n return df\n\n\ndef courseSearchByKeywordHandler(db):\n keyword = input('Please input a keyword for searching : ')\n courseSearchByKeyword(db, keyword)\n\n\ndef courseSearchByKeyword(db, keyword):\n keyword = keyword.split()\n keyword = '|'.join(keyword)\n try:\n print('Querying Documents...')\n print(' Finding a list of course which title....')\n listOfCourse = db.course.aggregate([{'$match': {'$or': [{'title': {\n '$regex': keyword}}, {'description': {'$regex': keyword}}, {\n 'colistWith': {'$regex': keyword}}]}}, {'$unwind': '$sections'},\n {'$sort': {'sections.recordTime': 1}}, {'$group': {'_id': {\n 'sid': '$sections.sectionId', 'code': '$code'}, 'code': {\n '$last': '$code'}, 'title': {'$last': '$title'}, 'credits': {\n '$last': '$credits'}, 'sections': {'$last': '$sections'},\n 'description': {'$last': '$description'}}}, {'$sort': {\n 'sections.sectionId': 1}}, {'$group': {'_id': {'code': '$code'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'sections': {'$push': {\n 'sectionId': '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait'}}, 'description': {'$last': '$description'}}},\n {'$project': {'_id': 0, 'code': 1, 'title': 1, 'credits': 1,\n 'sections': 1, 'description': 1}}])\n recordNo = 0\n for oneCourse in listOfCourse:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n print(\n \"\"\"code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}\"\"\"\n .format(oneCourse['code'], oneCourse['title'], oneCourse[\n 'credits'], oneCourse['sections'][0]['quota'], oneCourse[\n 'sections'][0]['enrol'], oneCourse['sections'][0]['avail'],\n oneCourse['sections'][0]['wait']))\n for oneSection in oneCourse['sections']:\n print('sections: {:s}, Date & Time: {:s}'.format(oneSection\n ['sectionId'], ' '.join(oneSection['dateAndTime'])))\n print('description: {:s}'.format(oneCourse['description']))\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef courseSearchByWaitingListSizeHandler(db):\n correctInput = False\n while correctInput == False:\n f = input('Please input a non-negative real number: ')\n correctInput = isNonNegativeFloat(f)\n start_ts = input('Please input a Starting Time Slot: ')\n end_ts = input('Please input a Ending Time Slot : ')\n courseSearchByWaitingListSize(db, f, start_ts, end_ts)\n\n\ndef courseSearchByWaitingListSize(db, f, start_ts, end_ts):\n try:\n print('Querying Documents...')\n listOfCourseWithWaitingListSize = db.course.aggregate([{'$unwind':\n '$sections'}, {'$match': {'$and': [{'sections.recordTime': {\n '$gte': datetime.datetime.strptime(start_ts, '%Y-%m-%d %H:%M')}\n }, {'sections.recordTime': {'$lte': datetime.datetime.strptime(\n end_ts, '%Y-%m-%d %H:%M')}}]}}, {'$project': {'code': 1,\n 'title': 1, 'credits': 1, 'sections': 1, 'satisfied': {'$gte':\n ['$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}\n ]}, 'lecSatisfied': {'$cond': [{'$and': [{'$gte': [\n '$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}]\n }, {'$eq': [{'$substr': ['$sections.sectionId', 0, 1]}, 'L']}]},\n 1, 0]}}}, {'$sort': {'sections.sectionId': 1}}, {'$group': {\n '_id': {'code': '$code', 'recordTime': '$sections.recordTime'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'recordTime': {'$last':\n '$sections.recordTime'}, 'sections': {'$push': {'sectionId':\n '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait', 'satisfied': '$satisfied'}},\n 'lecSatisfiedCount': {'$sum': '$lecSatisfied'}}}, {'$match': {\n 'lecSatisfiedCount': {'$gt': 0}}}, {'$sort': {'recordTime': 1}},\n {'$group': {'_id': {'code': '$code'}, 'code': {'$last': '$code'\n }, 'title': {'$last': '$title'}, 'credits': {'$last':\n '$credits'}, 'recordTime': {'$last': '$recordTime'}, 'sections':\n {'$last': '$sections'}}}, {'$project': {'_id': 0, 'code': 1,\n 'title': 1, 'credits': 1, 'recordTime': 1, 'sections': 1}}])\n recordNo = 0\n for oneCourse in listOfCourseWithWaitingListSize:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n pprint.pprint(oneCourse)\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef waitingListSizePredictionHandler(db):\n correctInput = False\n while correctInput == False:\n cc = input('Please input a Course Code: ')\n cc = str(cc).upper()\n correctInput = isCourseCode(cc)\n correctInput = False\n while correctInput == False:\n ln = input('Please input a Lecture number: ')\n correctInput = isIntNumber(ln)\n ts = input('Please input a Time Slot : ')\n N1, N2, N3, N4, N5 = waitingListSizePrediction(cc, ln, ts)\n print('The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:')\n print(N1, ',', N2, ',', N3, ',', N4, ',', N5)\n\n\n<mask token>\n\n\ndef waitingListSizePrediction(courseCode, lectureNumber, timeslot):\n earliestTime = datetime.datetime.strptime('2018-01-25T09:00Z',\n '%Y-%m-%dT%H:%MZ').timestamp()\n timeslot = int((datetime.datetime.strptime(timeslot, '%Y-%m-%d %H:%M').\n timestamp() - earliestTime) / 1800)\n lectureNumber = int(str(lectureNumber)[-1])\n courseCode = str(courseCode).upper()\n N1 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 1\n N2 = model4.predictionHandler(courseCode, lectureNumber, timeslot) - 1\n N3 = model4.predictionHandler(courseCode, lectureNumber, timeslot)\n N4 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 2\n N5 = model5.predictionHandler(courseCode, lectureNumber, timeslot)\n return int(N1), int(N2), int(N3), int(N4), int(N5)\n\n\ndef main():\n try:\n print('Making a MongoDB connection...')\n client = MongoClient('mongodb://localhost:27017')\n print('Getting a database named \"course\"')\n db = client['hkust']\n choice = '0'\n while choice != '6':\n print('')\n print(' Main Menu')\n print('=========================')\n print('1. Collection Dropping and Empty Collection Creating')\n print('2. Data Crawling')\n print('3. Course Search by Keyword')\n print('4. Course Search by Waiting List Size')\n print('5. Waiting List Size Prediction')\n print('6. Exit')\n print('')\n choice = input('Please input your choice (1-6): ')\n print('')\n if choice == '1':\n collectionDroppingAndEmptyCollectionCreatingHandler(db)\n elif choice == '2':\n dataCrawlingHandler()\n elif choice == '3':\n courseSearchByKeywordHandler(db)\n elif choice == '4':\n courseSearchByWaitingListSizeHandler(db)\n elif choice == '5':\n waitingListSizePredictionHandler(db)\n elif choice == '6':\n print('')\n else:\n print('Invalid Input!')\n client.close()\n except pymongo.errors.ConnectionFailure as error:\n print('DB Connection Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef isNonNegativeFloat(floatNumberInput):\n try:\n if float(floatNumberInput) >= 0:\n return True\n except ValueError as err:\n print('Your input is not a non-negative number: ', err)\n return False\n\n\ndef isCourseCode(corseCode):\n try:\n matchObj = re.match('[A-Z]?[A-Z]?[A-Z]?[A-Z]?\\\\d?\\\\d?\\\\d?\\\\d?([A-Z]?)',\n str(corseCode))\n if matchObj != None:\n return True\n except ValueError as err:\n print('Your courseCode is not correct: ', err)\n return False\n\n\n<mask token>\n\n\ndef collectionDroppingAndEmptyCollectionCreatingHandler(db):\n collectionDroppingAndEmptyCollectionCreating(db)\n\n\ndef collectionDroppingAndEmptyCollectionCreating(db):\n try:\n print('Dropping Collection...')\n print(\" Dropping collection 'course'...\")\n db.course.drop()\n except pymongo.errors.ConnectionFailure as error:\n print('Collection Dropping Failed! Error Message: \"{}\"'.format(error))\n print('Collection dropping and empty collection creating are successful')\n\n\n<mask token>\n\n\ndef dataCrawlingHandler():\n url = input('Please input the URL for Data Crawling: ')\n dataCrawling(url)\n\n\ndef dataCrawling(url):\n print('Data Crawling started')\n if str(url).lower() == 'default' or url == '':\n url = 'http://comp4332.com/realistic'\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data from default are inserted into the database'\n )\n else:\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data are inserted into the database from: '\n , str(url))\n\n\n<mask token>\n\n\ndef outputCourseDetails(courseCode, lectureSection=0, satisfied=''):\n if satisfied == '':\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26]},\n columns=cols)\n print(df)\n return df\n else:\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait',\n 'Satisfied']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26],\n 'Satisfied': ['Yes', 'No']}, columns=cols)\n print(df)\n return df\n\n\ndef courseSearchByKeywordHandler(db):\n keyword = input('Please input a keyword for searching : ')\n courseSearchByKeyword(db, keyword)\n\n\ndef courseSearchByKeyword(db, keyword):\n keyword = keyword.split()\n keyword = '|'.join(keyword)\n try:\n print('Querying Documents...')\n print(' Finding a list of course which title....')\n listOfCourse = db.course.aggregate([{'$match': {'$or': [{'title': {\n '$regex': keyword}}, {'description': {'$regex': keyword}}, {\n 'colistWith': {'$regex': keyword}}]}}, {'$unwind': '$sections'},\n {'$sort': {'sections.recordTime': 1}}, {'$group': {'_id': {\n 'sid': '$sections.sectionId', 'code': '$code'}, 'code': {\n '$last': '$code'}, 'title': {'$last': '$title'}, 'credits': {\n '$last': '$credits'}, 'sections': {'$last': '$sections'},\n 'description': {'$last': '$description'}}}, {'$sort': {\n 'sections.sectionId': 1}}, {'$group': {'_id': {'code': '$code'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'sections': {'$push': {\n 'sectionId': '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait'}}, 'description': {'$last': '$description'}}},\n {'$project': {'_id': 0, 'code': 1, 'title': 1, 'credits': 1,\n 'sections': 1, 'description': 1}}])\n recordNo = 0\n for oneCourse in listOfCourse:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n print(\n \"\"\"code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}\"\"\"\n .format(oneCourse['code'], oneCourse['title'], oneCourse[\n 'credits'], oneCourse['sections'][0]['quota'], oneCourse[\n 'sections'][0]['enrol'], oneCourse['sections'][0]['avail'],\n oneCourse['sections'][0]['wait']))\n for oneSection in oneCourse['sections']:\n print('sections: {:s}, Date & Time: {:s}'.format(oneSection\n ['sectionId'], ' '.join(oneSection['dateAndTime'])))\n print('description: {:s}'.format(oneCourse['description']))\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef courseSearchByWaitingListSizeHandler(db):\n correctInput = False\n while correctInput == False:\n f = input('Please input a non-negative real number: ')\n correctInput = isNonNegativeFloat(f)\n start_ts = input('Please input a Starting Time Slot: ')\n end_ts = input('Please input a Ending Time Slot : ')\n courseSearchByWaitingListSize(db, f, start_ts, end_ts)\n\n\ndef courseSearchByWaitingListSize(db, f, start_ts, end_ts):\n try:\n print('Querying Documents...')\n listOfCourseWithWaitingListSize = db.course.aggregate([{'$unwind':\n '$sections'}, {'$match': {'$and': [{'sections.recordTime': {\n '$gte': datetime.datetime.strptime(start_ts, '%Y-%m-%d %H:%M')}\n }, {'sections.recordTime': {'$lte': datetime.datetime.strptime(\n end_ts, '%Y-%m-%d %H:%M')}}]}}, {'$project': {'code': 1,\n 'title': 1, 'credits': 1, 'sections': 1, 'satisfied': {'$gte':\n ['$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}\n ]}, 'lecSatisfied': {'$cond': [{'$and': [{'$gte': [\n '$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}]\n }, {'$eq': [{'$substr': ['$sections.sectionId', 0, 1]}, 'L']}]},\n 1, 0]}}}, {'$sort': {'sections.sectionId': 1}}, {'$group': {\n '_id': {'code': '$code', 'recordTime': '$sections.recordTime'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'recordTime': {'$last':\n '$sections.recordTime'}, 'sections': {'$push': {'sectionId':\n '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait', 'satisfied': '$satisfied'}},\n 'lecSatisfiedCount': {'$sum': '$lecSatisfied'}}}, {'$match': {\n 'lecSatisfiedCount': {'$gt': 0}}}, {'$sort': {'recordTime': 1}},\n {'$group': {'_id': {'code': '$code'}, 'code': {'$last': '$code'\n }, 'title': {'$last': '$title'}, 'credits': {'$last':\n '$credits'}, 'recordTime': {'$last': '$recordTime'}, 'sections':\n {'$last': '$sections'}}}, {'$project': {'_id': 0, 'code': 1,\n 'title': 1, 'credits': 1, 'recordTime': 1, 'sections': 1}}])\n recordNo = 0\n for oneCourse in listOfCourseWithWaitingListSize:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n pprint.pprint(oneCourse)\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef waitingListSizePredictionHandler(db):\n correctInput = False\n while correctInput == False:\n cc = input('Please input a Course Code: ')\n cc = str(cc).upper()\n correctInput = isCourseCode(cc)\n correctInput = False\n while correctInput == False:\n ln = input('Please input a Lecture number: ')\n correctInput = isIntNumber(ln)\n ts = input('Please input a Time Slot : ')\n N1, N2, N3, N4, N5 = waitingListSizePrediction(cc, ln, ts)\n print('The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:')\n print(N1, ',', N2, ',', N3, ',', N4, ',', N5)\n\n\n<mask token>\n\n\ndef waitingListSizePrediction(courseCode, lectureNumber, timeslot):\n earliestTime = datetime.datetime.strptime('2018-01-25T09:00Z',\n '%Y-%m-%dT%H:%MZ').timestamp()\n timeslot = int((datetime.datetime.strptime(timeslot, '%Y-%m-%d %H:%M').\n timestamp() - earliestTime) / 1800)\n lectureNumber = int(str(lectureNumber)[-1])\n courseCode = str(courseCode).upper()\n N1 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 1\n N2 = model4.predictionHandler(courseCode, lectureNumber, timeslot) - 1\n N3 = model4.predictionHandler(courseCode, lectureNumber, timeslot)\n N4 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 2\n N5 = model5.predictionHandler(courseCode, lectureNumber, timeslot)\n return int(N1), int(N2), int(N3), int(N4), int(N5)\n\n\ndef main():\n try:\n print('Making a MongoDB connection...')\n client = MongoClient('mongodb://localhost:27017')\n print('Getting a database named \"course\"')\n db = client['hkust']\n choice = '0'\n while choice != '6':\n print('')\n print(' Main Menu')\n print('=========================')\n print('1. Collection Dropping and Empty Collection Creating')\n print('2. Data Crawling')\n print('3. Course Search by Keyword')\n print('4. Course Search by Waiting List Size')\n print('5. Waiting List Size Prediction')\n print('6. Exit')\n print('')\n choice = input('Please input your choice (1-6): ')\n print('')\n if choice == '1':\n collectionDroppingAndEmptyCollectionCreatingHandler(db)\n elif choice == '2':\n dataCrawlingHandler()\n elif choice == '3':\n courseSearchByKeywordHandler(db)\n elif choice == '4':\n courseSearchByWaitingListSizeHandler(db)\n elif choice == '5':\n waitingListSizePredictionHandler(db)\n elif choice == '6':\n print('')\n else:\n print('Invalid Input!')\n client.close()\n except pymongo.errors.ConnectionFailure as error:\n print('DB Connection Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n",
"step-4": "import pymongo\nfrom scrapy.selector import Selector\nimport pandas as pd\nimport datetime\nimport re\nimport pprint\nimport subprocess\nimport scrapy\nfrom pymongo import MongoClient\nimport model4\nimport model5\n\n\ndef isNonNegativeFloat(floatNumberInput):\n try:\n if float(floatNumberInput) >= 0:\n return True\n except ValueError as err:\n print('Your input is not a non-negative number: ', err)\n return False\n\n\ndef isCourseCode(corseCode):\n try:\n matchObj = re.match('[A-Z]?[A-Z]?[A-Z]?[A-Z]?\\\\d?\\\\d?\\\\d?\\\\d?([A-Z]?)',\n str(corseCode))\n if matchObj != None:\n return True\n except ValueError as err:\n print('Your courseCode is not correct: ', err)\n return False\n\n\ndef isIntNumber(intNumberInput):\n try:\n int(intNumberInput)\n return True\n except ValueError as err:\n print('Your number is not an integer: ', err)\n return False\n\n\n<mask token>\n\n\ndef collectionDroppingAndEmptyCollectionCreatingHandler(db):\n collectionDroppingAndEmptyCollectionCreating(db)\n\n\ndef collectionDroppingAndEmptyCollectionCreating(db):\n try:\n print('Dropping Collection...')\n print(\" Dropping collection 'course'...\")\n db.course.drop()\n except pymongo.errors.ConnectionFailure as error:\n print('Collection Dropping Failed! Error Message: \"{}\"'.format(error))\n print('Collection dropping and empty collection creating are successful')\n\n\n<mask token>\n\n\ndef dataCrawlingHandler():\n url = input('Please input the URL for Data Crawling: ')\n dataCrawling(url)\n\n\ndef dataCrawling(url):\n print('Data Crawling started')\n if str(url).lower() == 'default' or url == '':\n url = 'http://comp4332.com/realistic'\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data from default are inserted into the database'\n )\n else:\n with open('url.txt', 'w') as f:\n f.write(str(url))\n strCommand = 'scrapy crawl ustWebpageSpider'\n subprocess.run(strCommand, shell=True)\n print(\n 'Data Crawling is successful and all data are inserted into the database from: '\n , str(url))\n\n\n<mask token>\n\n\ndef outputCourseDetails(courseCode, lectureSection=0, satisfied=''):\n if satisfied == '':\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26]},\n columns=cols)\n print(df)\n return df\n else:\n cols = ['Course Code', 'Course Title', 'No. of Units/Credits',\n 'Section', 'Date & Time', 'Quota', 'Enrol', 'Avail', 'Wait',\n 'Satisfied']\n df = pd.DataFrame({'Course Code': ['COMP1001', 'COMP1021'],\n 'Course Title': ['Exploring Multimedia and Internet Computing',\n 'Introduction to Computer Science'], 'No. of Units/Credits': [3,\n 3], 'Section': ['L1,L2', 'L1'], 'Date & Time': [\n 'Th 03:00PM - 04:50PM', 'TuTh 04:30PM - 05:20PM'], 'Quota': [67,\n 80], 'Enrol': [19, 75], 'Avail': [48, 5], 'Wait': [0, 26],\n 'Satisfied': ['Yes', 'No']}, columns=cols)\n print(df)\n return df\n\n\ndef courseSearchByKeywordHandler(db):\n keyword = input('Please input a keyword for searching : ')\n courseSearchByKeyword(db, keyword)\n\n\ndef courseSearchByKeyword(db, keyword):\n keyword = keyword.split()\n keyword = '|'.join(keyword)\n try:\n print('Querying Documents...')\n print(' Finding a list of course which title....')\n listOfCourse = db.course.aggregate([{'$match': {'$or': [{'title': {\n '$regex': keyword}}, {'description': {'$regex': keyword}}, {\n 'colistWith': {'$regex': keyword}}]}}, {'$unwind': '$sections'},\n {'$sort': {'sections.recordTime': 1}}, {'$group': {'_id': {\n 'sid': '$sections.sectionId', 'code': '$code'}, 'code': {\n '$last': '$code'}, 'title': {'$last': '$title'}, 'credits': {\n '$last': '$credits'}, 'sections': {'$last': '$sections'},\n 'description': {'$last': '$description'}}}, {'$sort': {\n 'sections.sectionId': 1}}, {'$group': {'_id': {'code': '$code'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'sections': {'$push': {\n 'sectionId': '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait'}}, 'description': {'$last': '$description'}}},\n {'$project': {'_id': 0, 'code': 1, 'title': 1, 'credits': 1,\n 'sections': 1, 'description': 1}}])\n recordNo = 0\n for oneCourse in listOfCourse:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n print(\n \"\"\"code: {:s}\ntitle: {:s}\ncredits: {:0.2f}\nquota: {:d}\nenrol: {:d}\navail: {:d}\nwait: {:d}\"\"\"\n .format(oneCourse['code'], oneCourse['title'], oneCourse[\n 'credits'], oneCourse['sections'][0]['quota'], oneCourse[\n 'sections'][0]['enrol'], oneCourse['sections'][0]['avail'],\n oneCourse['sections'][0]['wait']))\n for oneSection in oneCourse['sections']:\n print('sections: {:s}, Date & Time: {:s}'.format(oneSection\n ['sectionId'], ' '.join(oneSection['dateAndTime'])))\n print('description: {:s}'.format(oneCourse['description']))\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef courseSearchByWaitingListSizeHandler(db):\n correctInput = False\n while correctInput == False:\n f = input('Please input a non-negative real number: ')\n correctInput = isNonNegativeFloat(f)\n start_ts = input('Please input a Starting Time Slot: ')\n end_ts = input('Please input a Ending Time Slot : ')\n courseSearchByWaitingListSize(db, f, start_ts, end_ts)\n\n\ndef courseSearchByWaitingListSize(db, f, start_ts, end_ts):\n try:\n print('Querying Documents...')\n listOfCourseWithWaitingListSize = db.course.aggregate([{'$unwind':\n '$sections'}, {'$match': {'$and': [{'sections.recordTime': {\n '$gte': datetime.datetime.strptime(start_ts, '%Y-%m-%d %H:%M')}\n }, {'sections.recordTime': {'$lte': datetime.datetime.strptime(\n end_ts, '%Y-%m-%d %H:%M')}}]}}, {'$project': {'code': 1,\n 'title': 1, 'credits': 1, 'sections': 1, 'satisfied': {'$gte':\n ['$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}\n ]}, 'lecSatisfied': {'$cond': [{'$and': [{'$gte': [\n '$sections.wait', {'$multiply': ['$sections.enrol', float(f)]}]\n }, {'$eq': [{'$substr': ['$sections.sectionId', 0, 1]}, 'L']}]},\n 1, 0]}}}, {'$sort': {'sections.sectionId': 1}}, {'$group': {\n '_id': {'code': '$code', 'recordTime': '$sections.recordTime'},\n 'code': {'$last': '$code'}, 'title': {'$last': '$title'},\n 'credits': {'$last': '$credits'}, 'recordTime': {'$last':\n '$sections.recordTime'}, 'sections': {'$push': {'sectionId':\n '$sections.sectionId', 'dateAndTime':\n '$sections.offerings.dateAndTime', 'quota': '$sections.quota',\n 'enrol': '$sections.enrol', 'avail': {'$subtract': [\n '$sections.quota', '$sections.enrol']}, 'wait':\n '$sections.wait', 'satisfied': '$satisfied'}},\n 'lecSatisfiedCount': {'$sum': '$lecSatisfied'}}}, {'$match': {\n 'lecSatisfiedCount': {'$gt': 0}}}, {'$sort': {'recordTime': 1}},\n {'$group': {'_id': {'code': '$code'}, 'code': {'$last': '$code'\n }, 'title': {'$last': '$title'}, 'credits': {'$last':\n '$credits'}, 'recordTime': {'$last': '$recordTime'}, 'sections':\n {'$last': '$sections'}}}, {'$project': {'_id': 0, 'code': 1,\n 'title': 1, 'credits': 1, 'recordTime': 1, 'sections': 1}}])\n recordNo = 0\n for oneCourse in listOfCourseWithWaitingListSize:\n recordNo = recordNo + 1\n print('Record {:d}:'.format(recordNo))\n pprint.pprint(oneCourse)\n except pymongo.errors.ConnectionFailure as error:\n print('Document Querying Failed! Error Message: \"{}\"'.format(error))\n\n\n<mask token>\n\n\ndef waitingListSizePredictionHandler(db):\n correctInput = False\n while correctInput == False:\n cc = input('Please input a Course Code: ')\n cc = str(cc).upper()\n correctInput = isCourseCode(cc)\n correctInput = False\n while correctInput == False:\n ln = input('Please input a Lecture number: ')\n correctInput = isIntNumber(ln)\n ts = input('Please input a Time Slot : ')\n N1, N2, N3, N4, N5 = waitingListSizePrediction(cc, ln, ts)\n print('The prediction result \"N1, N2, N3, N4, N5\" from 5 Models:')\n print(N1, ',', N2, ',', N3, ',', N4, ',', N5)\n\n\n<mask token>\n\n\ndef waitingListSizeTraining():\n print('Waiting list size training is successful')\n return courseData\n\n\ndef waitingListSizePrediction(courseCode, lectureNumber, timeslot):\n earliestTime = datetime.datetime.strptime('2018-01-25T09:00Z',\n '%Y-%m-%dT%H:%MZ').timestamp()\n timeslot = int((datetime.datetime.strptime(timeslot, '%Y-%m-%d %H:%M').\n timestamp() - earliestTime) / 1800)\n lectureNumber = int(str(lectureNumber)[-1])\n courseCode = str(courseCode).upper()\n N1 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 1\n N2 = model4.predictionHandler(courseCode, lectureNumber, timeslot) - 1\n N3 = model4.predictionHandler(courseCode, lectureNumber, timeslot)\n N4 = model4.predictionHandler(courseCode, lectureNumber, timeslot) + 2\n N5 = model5.predictionHandler(courseCode, lectureNumber, timeslot)\n return int(N1), int(N2), int(N3), int(N4), int(N5)\n\n\ndef main():\n try:\n print('Making a MongoDB connection...')\n client = MongoClient('mongodb://localhost:27017')\n print('Getting a database named \"course\"')\n db = client['hkust']\n choice = '0'\n while choice != '6':\n print('')\n print(' Main Menu')\n print('=========================')\n print('1. Collection Dropping and Empty Collection Creating')\n print('2. Data Crawling')\n print('3. Course Search by Keyword')\n print('4. Course Search by Waiting List Size')\n print('5. Waiting List Size Prediction')\n print('6. Exit')\n print('')\n choice = input('Please input your choice (1-6): ')\n print('')\n if choice == '1':\n collectionDroppingAndEmptyCollectionCreatingHandler(db)\n elif choice == '2':\n dataCrawlingHandler()\n elif choice == '3':\n courseSearchByKeywordHandler(db)\n elif choice == '4':\n courseSearchByWaitingListSizeHandler(db)\n elif choice == '5':\n waitingListSizePredictionHandler(db)\n elif choice == '6':\n print('')\n else:\n print('Invalid Input!')\n client.close()\n except pymongo.errors.ConnectionFailure as error:\n print('DB Connection Failed! Error Message: \"{}\"'.format(error))\n\n\nmain()\n",
"step-5": "#!/usr/bin/python\n# The program was written by YU Ho Yung and LEUNG Kin Tung in group 24. \n# The program is written with stubs for the phase 3\n#\nimport pymongo\nfrom scrapy.selector import Selector\nimport pandas as pd\n# import numpy as np\nimport datetime\nimport re\nimport pprint\nimport subprocess\nimport scrapy\nfrom pymongo import MongoClient\nimport model4\nimport model5\n\ndef isNonNegativeFloat(floatNumberInput):\n\ttry:\n\t\tif(float(floatNumberInput) >= 0):\n\t\t\treturn True\n\texcept ValueError as err:\n\t\tprint('Your input is not a non-negative number: ', err)\n\t\t# pass\n\treturn False\n\ndef isCourseCode(corseCode):\n\ttry:\n\t\tmatchObj = re.match(r'[A-Z]?[A-Z]?[A-Z]?[A-Z]?\\d?\\d?\\d?\\d?([A-Z]?)',str(corseCode))\n\t\tif( matchObj != None):\n\t\t\treturn True\n\texcept ValueError as err:\n\t\tprint('Your courseCode is not correct: ', err)\n\t\t# pass\n\treturn False\n\ndef isIntNumber(intNumberInput):\n\ttry:\n\t\tint(intNumberInput)\n\t\treturn True\n\texcept ValueError as err:\n\t\tprint('Your number is not an integer: ', err)\n\t\t# pass\n\treturn False\n\t\n'''\n\t5.1 Collection Dropping and Empty Collection Creating\n\t(This feature will be used for the demonstration purpose.\n\tThe detailed implementation of this feature will be completed by you in Phase 3.)\n\t\n\tInput:\n\tnone\n\t\n\tOutput:\n\tDisplay a message “Collection dropping and empty collection creating are successful”\n\t(after the collection(s) is(are) removed and the new empty collection(s) is(are) created).\n'''\n# to handle the function \"Collection Dropping and Empty Collection Creating\"\ndef collectionDroppingAndEmptyCollectionCreatingHandler(db):\n\t# to execute the function \"update address\"\n\tcollectionDroppingAndEmptyCollectionCreating(db)\n\ndef collectionDroppingAndEmptyCollectionCreating(db):\n\t#A function to drop and empty all collections\n\n\t# Dropping Collection\n\n\ttry:\n\t\tprint(\"Dropping Collection...\")\n\t\tprint(\" Dropping collection \\'course\\'...\")\n\t\tdb.course.drop()\n\texcept pymongo.errors.ConnectionFailure as error: \n\t\tprint(\"Collection Dropping Failed! Error Message: \\\"{}\\\"\".format(error))\n\n\tprint(\"Collection dropping and empty collection creating are successful\")\n\n'''\n\t5.2 Data Crawling\n\t(The detailed implementation of this feature will be completed by you in Phase 3.)\n\n\tInput:\n\ta URL (e.g., “http://course.cse.ust.hk/comp4332/index.html”) or\n\ta special keyword (i.e., “default”)\n\t\n\tOutput:\n\tIf the input is “default”, display a message “Data Crawling is successful and all data are inserted into the database”\n\t(after all data are crawled from the default URL given in the project webpage and are inserted into the database).\n\tOtherwise, do the same prompt operation but the URL used is the URL typed in the input.\n'''\ndef dataCrawlingHandler():\n\turl = input(\"Please input the URL for Data Crawling: \")\n\tdataCrawling(url)\n\ndef dataCrawling(url):\n\tprint(\"Data Crawling started\")\n\tif(str(url).lower() == \"default\" or url == \"\"):\n\t\t#implement the crawling function from default website\n\t\t# Inserting Documents\n\t\turl = 'http://comp4332.com/realistic'\n\t\t#testing \n\t\t# url = 'http://comp4332.com/trial'\n\t\twith open('url.txt', 'w') as f: #Please double check if this works\n\t\t\tf.write(str(url))\n\t\tstrCommand = \"scrapy crawl ustWebpageSpider\" #referred to ustWebpageSpider.py\n\t\tsubprocess.run(strCommand, shell=True)\n\t\tprint(\"Data Crawling is successful and all data from default are inserted into the database\")\n\telse:\n\t\twith open('url.txt', 'w') as f: #Please double check if this works\n\t\t\tf.write(str(url))\n\t\tstrCommand = \"scrapy crawl ustWebpageSpider\" #referred to ustWebpageSpider.py\n\t\tsubprocess.run(strCommand, shell=True)\n\t\t# implement the crawling function from the given url\n\t\tprint(\"Data Crawling is successful and all data are inserted into the database from: \", str(url))\n\t\t#The detailed implementation of this feature will be completed in Phase 3\n\n'''\n\t5.3 Course Search\n\t(The detailed implementation of this feature will be completed by you in Phase 4.\n\tBut, the design of this feature will be completed in Phase 2.)\n\tWe have the following two operations for a course search.\n\t1. Course Search by Keyword\n\t2. Course Search by Waiting List Size\n\tNote: Although there are some missing data in this project (which may require “prediction”),\n\tin this part/feature, you just perform these operations for a course search only based on the data given to you.\n\tThere is no need to perform any “prediction” in this part.\n'''\n#def courseSearch():\n#This is just an abstraction here, not even a stub.\n#The detailed implementation of this feature will be completed in Phase 4.\n\n'''\n\t5.3.1 Course Search by Keyword\n\n\tInput: \n\ta text (k) where this text is called “keyword(s)”\n\n\tOutput: \n\tA list of courses which course titles, course description or course remarks match the given text k.\n\tIn the output, for each course, please show “Course Code”, “Course Title”, “No. of Units/Credits”,\n\ta list of sections of the course each with “Section”, “Date & Time”, “Quota”, “Enrol”, “Avail” and “Wait”.\n\tPlease sort the list of courses in ascending order of “Course Code”.\n\t(Within a single course, please sort in ascending order of “Sections”)\n\tWe say that a phrase P matches text k if at least one of the words in phrase P is equal to one of words in k.\n\tFor example, if P = “Big Data Mining and Management” and k = “Mining”, then P matches k.\n\tIf P = “Big Data Mining and Management” and k = “Risk Mining”, then P matches k too.\n\tIf P = “Big Data Mining and Management” and k = “Mining Management”, then P matches k.\n'''\n\t# \"lectureSection\" is optional \n\t# \"satisfied\" is optional\ndef outputCourseDetails(courseCode, lectureSection = 0, satisfied = \"\"):\n\t#: search the course code which match in database\n\t#TODO: print the Course Details of the Course Code\n\tif(satisfied == \"\"):\n\t\tcols = [\"Course Code\", \"Course Title\", \"No. of Units/Credits\", \"Section\", \"Date & Time\", \"Quota\", \"Enrol\", \"Avail\",\"Wait\"]\n\t\tdf = pd.DataFrame({\"Course Code\" : [\"COMP1001\", \"COMP1021\"],\"Course Title\": [\"Exploring Multimedia and Internet Computing\",\"Introduction to Computer Science\"],\"No. of Units/Credits\":[3,3], \"Section\":[\"L1,L2\",\"L1\"], \"Date & Time\":[\"Th 03:00PM - 04:50PM\",\"TuTh 04:30PM - 05:20PM\"], \"Quota\":[67,80], \"Enrol\":[19,75], \"Avail\":[48,5],\"Wait\":[0,26]},columns=cols)\n\t\tprint(df)\n\t\t#return df.values.tolist()\n\t\treturn df\n\telse:\n\t\tcols = [\"Course Code\", \"Course Title\", \"No. of Units/Credits\", \"Section\", \"Date & Time\", \"Quota\", \"Enrol\", \"Avail\",\"Wait\", \"Satisfied\"]\n\t\tdf = pd.DataFrame({\"Course Code\" : [\"COMP1001\", \"COMP1021\"],\"Course Title\": [\"Exploring Multimedia and Internet Computing\",\"Introduction to Computer Science\"],\"No. of Units/Credits\":[3,3], \"Section\":[\"L1,L2\",\"L1\"], \"Date & Time\":[\"Th 03:00PM - 04:50PM\",\"TuTh 04:30PM - 05:20PM\"], \"Quota\":[67,80], \"Enrol\":[19,75], \"Avail\":[48,5],\"Wait\":[0,26], \"Satisfied\":[\"Yes\",\"No\"]},columns=cols)\n\t\tprint(df)\n\t\t#return df.values.tolist()\n\t\treturn df\n\ndef courseSearchByKeywordHandler(db):\n\tkeyword = input(\"Please input a keyword for searching : \")\n\tcourseSearchByKeyword(db,keyword)\n\ndef courseSearchByKeyword(db,keyword):\n\tkeyword = keyword.split() \n\tkeyword = \"|\".join(keyword)\n\t#TODO:Use the keyword to find a list of courses. \n\t#The keyword will be searched in course titles, course description or course remarks.\n\ttry:\n\t\tprint(\"Querying Documents...\")\t\t\t\n\t\tprint(\" Finding a list of course which title....\")\t\n\t\t# listOfCourse = db.course.find()\n\t\tlistOfCourse = db.course.aggregate([\n\t\t\t\t{\n\t\t\t\t\t\"$match\": {\n\t\t\t\t\t\t\"$or\": [\n\t\t\t\t\t\t\t\t{\"title\": {'$regex': keyword}},\n\t\t\t\t\t\t\t\t{\"description\": {'$regex': keyword}},\n\t\t\t\t\t\t\t\t{\"colistWith\": {'$regex': keyword}}\n\t\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$unwind\": \"$sections\"\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$sort\": {\"sections.recordTime\": 1 }\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$group\":{\n\t\t\t\t\t\t\"_id\":{\"sid\":\"$sections.sectionId\", \"code\": \"$code\"},\n\t\t\t\t\t\t\"code\": {\"$last\": \"$code\"},\n\t\t\t\t\t\t\"title\": {\"$last\": \"$title\"},\n\t\t\t\t\t\t\"credits\": {\"$last\": \"$credits\"},\n\t\t\t\t\t\t\"sections\":{\"$last\": \"$sections\"},\n\t\t\t\t\t\t\"description\":{\"$last\":\"$description\"}\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$sort\": {\"sections.sectionId\": 1 }\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$group\":{\n\t\t\t\t\t\t\"_id\":{ \"code\": \"$code\"},\n\t\t\t\t\t\t\"code\": {\"$last\": \"$code\"},\n\t\t\t\t\t\t\"title\": {\"$last\": \"$title\"},\n\t\t\t\t\t\t\"credits\": {\"$last\": \"$credits\"},\n\t\t\t\t\t\t\"sections\":{\n\t\t\t\t\t\t\t\"$push\": {\n\t\t\t\t\t\t\t\t\"sectionId\":\"$sections.sectionId\",\n\t\t\t\t\t\t\t\t\"dateAndTime\":\"$sections.offerings.dateAndTime\",\n\t\t\t\t\t\t\t\t\"quota\":\"$sections.quota\",\n\t\t\t\t\t\t\t\t\"enrol\":\"$sections.enrol\",\n\t\t\t\t\t\t\t\t\"avail\": { \"$subtract\": [ \"$sections.quota\", \"$sections.enrol\"] } ,\n\t\t\t\t\t\t\t\t\"wait\":\"$sections.wait\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"description\":{\"$last\":\"$description\"}\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"$project\":{\"_id\":0,\"code\":1,\"title\":1,\"credits\":1,\"sections\":1,\"description\":1}\n\t\t\t\t}\n\t\t])\n\t\trecordNo = 0\n\t\tfor oneCourse in listOfCourse:\n\t\t\trecordNo = recordNo + 1\n\t\t\tprint(\"Record {:d}:\".format(recordNo))\n\t\t\tprint(\"code: {:s}\\ntitle: {:s}\\ncredits: {:0.2f}\\nquota: {:d}\\nenrol: {:d}\\navail: {:d}\\nwait: {:d}\".format(oneCourse[\"code\"], oneCourse[\"title\"], oneCourse[\"credits\"],oneCourse[\"sections\"][0][\"quota\"],oneCourse[\"sections\"][0][\"enrol\"],oneCourse[\"sections\"][0][\"avail\"],oneCourse[\"sections\"][0][\"wait\"]))\n\t\t\tfor oneSection in oneCourse[\"sections\"]:\n\t\t\t\tprint(\"sections: {:s}, Date & Time: {:s}\".format(oneSection[\"sectionId\"],' '.join(oneSection[\"dateAndTime\"])))\t\t\t\n\t\t\tprint(\"description: {:s}\".format(oneCourse[\"description\"]))\n\t\t\t# pprint.pprint(oneCourse)\n\texcept pymongo.errors.ConnectionFailure as error: \n\t\tprint(\"Document Querying Failed! Error Message: \\\"{}\\\"\".format(error))\n\n\t# courseCode = \"COMP1001\"\n\t# return outputCourseDetails(courseCode)\n\n'''\n\t5.3.2 Course Search by Waiting List Size\n\t\n\tInput:\n\tA non-negative real number f\n\tStarting Time Slot (start_ts)\n\tEnding Time Slot (end_ts)\n\t\n\tOutput:\n\tA list of courses each of which has a lecture section (e.g., “L1” and “L2”) in a time slot,\n\tsays match_ts,between start_ts (inclusively) and end_ts (inclusively)\n\twhere \n\tthe number of students in the waiting list of this lecture section is\n\tgreater than or equal to \n\tf multiplied by the number of students enrolled in this lecture section in that timeslot.\n\tIn the output, for each “distinct” course, please show \n\t“Course Code”, \n\tCourse Title”, \n\t“No. of Units/Credits”, \n\t“Matched Time Slot”,\n\ta list of sections (including both lecture 9/17 COMP4332/RMBI4310 Project (Spring 2018) Course Registration Data Analytics\n\tsections and non-lecture sections) \n\tof the course each with “Section”, \n\t\t“Date & Time”, \n\t\t“Quota”, \n\t\t“Enrol”, \n\t\t“Avail”, \n\t\t“Wait” and \n\t\t“Satisfied”\n\t\t(all shown with the content/values recorded in the time slot match_ts).\n\tNote that “Matched Time Slot” is a new attribute in this query and it is equal to match_ts.\n\tIf a single course satisfies the required condition in multiple time slots \n\t(no matter which lecture section of this course satisfies the required condition),\n\twe just show the latest time slot among all these time slots in which this course satisfies the required condition.\n\tThus, each course should appear at most once in the output.\n\t\n\tNote that “Satisfied” is another new attribute in this query.\n\tIt is equal to “Yes” \n\tif the number of students in the waiting list of this section\n\tis greater than or equal to \n\tf multiplied by the number ofstudents enrolled in this section in that time slot.\n\tIt is equal to “No” otherwise.\n\t\n\tAttribute “Satisfied” is not needed to be considered in Phase 2.\n\t\n\tPlease sort the list of courses in ascending order of “Course Code”.\n\t(Within a single course, please sort in ascending order of “Sections”)\n'''\ndef courseSearchByWaitingListSizeHandler(db):\n\tcorrectInput = False\n\twhile(correctInput == False):\n\t\tf = input(\"Please input a non-negative real number: \")\n\t\tcorrectInput = isNonNegativeFloat(f)\n\tstart_ts = input(\"Please input a Starting Time Slot: \")\n\tend_ts = input(\"Please input a Ending Time Slot : \")\n\tcourseSearchByWaitingListSize(db, f, start_ts, end_ts)\n\n# A non-negative real number f\n# Starting Time Slot (start_ts)\n# Ending Time Slot (end_ts)\ndef courseSearchByWaitingListSize(db, f, start_ts, end_ts):\n\t#TODO: A function uses the Waiting List Size number to find a list of courses and output a list of course code with lecture section\n\n\t# satisfied = \"Yes\"\n\t# f = 0.01\n\ttry:\n\t\tprint(\"Querying Documents...\")\n\t\tlistOfCourseWithWaitingListSize = db.course.aggregate([\t\n\t\t\t{ \"$unwind\": \"$sections\" },\n\t\t\t{\"$match\": \n\t\t\t\t{\"$and\":[\n\t\t\t\t\t{\"sections.recordTime\": {\"$gte\": datetime.datetime.strptime(start_ts, \"%Y-%m-%d %H:%M\")}},\n\t\t\t\t\t{\"sections.recordTime\": {\"$lte\": datetime.datetime.strptime(end_ts, \"%Y-%m-%d %H:%M\")}}\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t},\n\t\t\t{ \"$project\": \n\t\t\t\t{\"code\": 1,\n\t\t\t\t\"title\": 1,\n\t\t\t\t\"credits\": 1,\n\t\t\t\t\"sections\":1,\n\t\t\t\t\"satisfied\":{\"$gte\":[\"$sections.wait\",{\"$multiply\":[\"$sections.enrol\",float(f)]}]},\n\t\t\t\t\"lecSatisfied\":{\n\t\t\t\t\t\"$cond\":[{\n\t\t\t\t\t\t\"$and\":[\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"$gte\":[\"$sections.wait\",{\"$multiply\":[\"$sections.enrol\",float(f)]}]\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"$eq\":[{\"$substr\": [\"$sections.sectionId\",0,1]},\"L\"]\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t]\n\t\t\t\t\t},1,0]\n\t\t\t\t}\n\t\t\t\t},\n\t\t\t}, \n\t\t\t{\n\t\t\t\t\"$sort\": {\"sections.sectionId\": 1 }\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"$group\":{\n\t\t\t\t\t\"_id\":{ \"code\": \"$code\", \"recordTime\":\"$sections.recordTime\"},\n\t\t\t\t\t\"code\": {\"$last\": \"$code\"},\n\t\t\t\t\t\"title\": {\"$last\": \"$title\"},\n\t\t\t\t\t\"credits\": {\"$last\": \"$credits\"},\n\t\t\t\t\t\"recordTime\":{\"$last\": \"$sections.recordTime\"},\n\t\t\t\t\t\"sections\":{\n\t\t\t\t\t\t\"$push\": {\n\t\t\t\t\t\t\t\"sectionId\":\"$sections.sectionId\",\n\t\t\t\t\t\t\t\"dateAndTime\":\"$sections.offerings.dateAndTime\",\n\t\t\t\t\t\t\t\"quota\":\"$sections.quota\",\n\t\t\t\t\t\t\t\"enrol\":\"$sections.enrol\",\n\t\t\t\t\t\t\t\"avail\": { \"$subtract\": [ \"$sections.quota\", \"$sections.enrol\"] } ,\n\t\t\t\t\t\t\t\"wait\":\"$sections.wait\",\n\t\t\t\t\t\t\t\"satisfied\":\"$satisfied\",\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"lecSatisfiedCount\":{\"$sum\":\"$lecSatisfied\"}\n\t\t\t\t}\n\t\t\t},\n\t\t\t{ \"$match\": {\"lecSatisfiedCount\": {\"$gt\":0}} \n\t\t\t},\n\t\t\t{\n\t\t\t\t\"$sort\": {\"recordTime\": 1 }\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"$group\":{\n\t\t\t\t\t\"_id\":{ \"code\": \"$code\"},\n\t\t\t\t\t\"code\": {\"$last\": \"$code\"},\n\t\t\t\t\t\"title\": {\"$last\": \"$title\"},\n\t\t\t\t\t\"credits\": {\"$last\": \"$credits\"},\n\t\t\t\t\t\"recordTime\":{\"$last\": \"$recordTime\"},\n\t\t\t\t\t\"sections\":{\"$last\": \"$sections\"}\n\t\t\t\t}\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"$project\":{\n\t\t\t\t\t\"_id\":0,\n\t\t\t\t\t\"code\": 1,\n\t\t\t\t\t\"title\":1,\n\t\t\t\t\t\"credits\": 1,\n\t\t\t\t\t\"recordTime\":1,\n\t\t\t\t\t\"sections\":1\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t\t)\n\t\trecordNo = 0\n\t\tfor oneCourse in listOfCourseWithWaitingListSize:\n\t\t\trecordNo = recordNo + 1\n\t\t\tprint(\"Record {:d}:\".format(recordNo))\n\t\t\tpprint.pprint(oneCourse)\n\t\t\t# print(\"code: {:s}\\ntitle: {:s}\\ncredits: {:0.2f}\\nquota: {:d}\\nenrol: {:d}\\navail: {:d}\\nwait: {:d}\".format(oneCourse[\"code\"], oneCourse[\"title\"], oneCourse[\"credits\"],oneCourse[\"sections\"][0][\"quota\"],oneCourse[\"sections\"][0][\"enrol\"],oneCourse[\"sections\"][0][\"avail\"],oneCourse[\"sections\"][0][\"wait\"]))\n\t\t\t# for oneSection in oneCourse[\"sections\"]:\n\t\t\t# \tprint(\"sections: {:s}, Date & Time: {:s}\".format(oneSection[\"sectionId\"],' '.join(oneSection[\"dateAndTime\"])))\t\t\t\n\t\t\t# print(\"description: {:s}\".format(oneCourse[\"description\"]))\n\t\t\t#pprint(\" Record {:d}: (sid={:s}, sname={:s}, byear={:d})\".format(recordNo, oneStudent[\"sid\"], oneStudent[\"sname\"], oneStudent[\"byear\"]))\n\t\t\t#print(\"Record {:d}: (course={:s})\".format(recordNo, oneCourse))\t\n\texcept pymongo.errors.ConnectionFailure as error: \n\t\tprint(\"Document Querying Failed! Error Message: \\\"{}\\\"\".format(error))\n\t#return outputCourseDetails(courseCode, lectureSection, satisfied)\n\n'''\n\t5.4 Waiting List Size Prediction\n\t(The detailed implementation of this feature will be completed by you in Phase 6. \n\tBut, the design of this feature will be completed in Phase 5.)\n\n\tInput: \n\tCourse Code (cc)\n\tLecture number (ln) (e.g., the input should be “1” denoting “L1”) \n\tTime Slot (ts)\n\n\tOutput: \n\t“N1,N2,N3,N4,N5”\n\twhere Ni denotes the number of students in the waiting list of the lecture number (ln) (if any) of the course cc \n\tin the given time slot (ts) predicted by Model i for each i in [1, 5] \n\t(Note that these 5 numbers are integers.)\n\tNote: Since we know that training a model may take some time, in general, “cc” could be any course code. \n\tHowever, in our demonstration, we will test with the course code “cc” starting from “COMP1942”, “COMP42”, “COMP43” or “RMBI” only \n\t(i.e., (1) the COMP course (“COMP1942”), (2) any COMP course with starting course digits equal to “42” or “43” and (3) any RMBI course). \n\tThus, you just need to train your model with the data from the course with the course code “cc” starting from\n\tthese course code prefixes described above before our demonstration. \n\tWhen we use this feature in our demonstration, you just need to load the trained model and perform the prediction of this feature based on the trained model.\n\tIf there is no lecture section of the course (cc) specified in the input or if the lecture number entered (ln) is not offered for the course (cc) specified in the input, \n\twe just need to show “There is no lecture section and thus there is no prediction result.”\n\tAlthough it is possible that we could use 5 models for one course and we could also use 5 “different” models for another course, \n\tfor the sake of simplicity, please use the same 5 models for any course needed. \n\tOf course, even if the 5 models are the same (with the same set of “input” parameter values) for any two courses, \n\twe know that each of the 5 models could be trained with different enrollment data from different courses, resulting in different “model” parameter values \n\t(e.g., the weight values between neurons in a neural network which should be found from the data).\n'''\ndef\twaitingListSizePredictionHandler(db):\n\tcorrectInput = False\n\twhile(correctInput == False):\n\t\tcc = input(\"Please input a Course Code: \")\n\t\tcc = str(cc).upper()\n\t\tcorrectInput= isCourseCode(cc)\n\tcorrectInput = False\n\twhile(correctInput == False):\n\t\tln = input(\"Please input a Lecture number: \")\n\t\tcorrectInput = isIntNumber(ln)\n\tts = input(\"Please input a Time Slot : \")\n\tN1, N2, N3, N4, N5 = waitingListSizePrediction(cc,ln,ts)\n\tprint(\"The prediction result \\\"N1, N2, N3, N4, N5\\\" from 5 Models:\")\n\tprint(N1,\",\", N2,\",\", N3,\",\", N4,\",\", N5)\n\n\n'''\n5.5 Waiting List Size Training\n\t(This feature will be used for your “own” training purpose before we have the real feature from Section 5.4.\n\tThe detailed implementation of this feature will be completed by you in Phase 6. \n\tBut, the design of this feature will be completed in Phase 5.)\n\t\n\tInput: \n\tnone\n\n\tOutput: \n\tDisplay a message “Waiting list size training is successful” (after the training process on the waiting list size finishes).\n'''\n\n\ndef waitingListSizeTraining():\n\t#TODO: The function for the training process on the waiting list size\n\tprint(\"Waiting list size training is successful\")\n\treturn courseData\n\n# Course Code (cc)\n# Lecture number (ln) (e.g., the input should be “1” denoting “L1”)\n# Time Slot (ts) \ndef\twaitingListSizePrediction(courseCode,lectureNumber, timeslot):\n\t# courseData = waitingListSizeTraining()\n\t#TODO: Create 5 model to find the prediction\n\t# timeslot = \"2018-01-26 22:30\"\n\tearliestTime = datetime.datetime.strptime(\"2018-01-25T09:00Z\", \"%Y-%m-%dT%H:%MZ\").timestamp()\n\ttimeslot = int((datetime.datetime.strptime(timeslot, \"%Y-%m-%d %H:%M\").timestamp() - earliestTime)/1800)\n\tlectureNumber= int(str(lectureNumber)[-1])\n\tcourseCode = str(courseCode).upper()\n\n\t# print(courseData)\n\tN1 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+1\n\tN2 = model4.predictionHandler(courseCode,lectureNumber, timeslot)-1\n\tN3 = model4.predictionHandler(courseCode,lectureNumber, timeslot)\n\tN4 = model4.predictionHandler(courseCode,lectureNumber, timeslot)+2\n\tN5 = model5.predictionHandler(courseCode,lectureNumber, timeslot)\n\t#There are 5 Models to predict 5 different (int) result\n\t#N1, N2, N3, N4, N5 = 11,12,11,14,13\n\treturn int(N1), int(N2), int(N3), int(N4), int(N5)\n\n# to display the system interface with stubs\ndef main():\n\ttry:\n\t\t# Making a DB connection\n\t\tprint(\"Making a MongoDB connection...\")\n\t\tclient = MongoClient(\"mongodb://localhost:27017\")\n\t\t\n\t\t# Getting a Database named \"course\"\n\t\tprint(\"Getting a database named \\\"course\\\"\")\n\t\tdb = client[\"hkust\"]\n\n\t\t# here, we need to implement for the flow\n\t\t# display the menu\n\t\tchoice = \"0\"\n\t\twhile (choice != \"6\"):\n\t\t\tprint(\"\")\n\t\t\tprint(\" Main Menu\")\n\t\t\tprint(\"=========================\")\n\t\t\tprint(\"1. Collection Dropping and Empty Collection Creating\")\n\t\t\tprint(\"2. Data Crawling\")\n\t\t\tprint(\"3. Course Search by Keyword\")\n\t\t\tprint(\"4. Course Search by Waiting List Size\")\n\t\t\tprint(\"5. Waiting List Size Prediction\")\n\t\t\tprint(\"6. Exit\")\n\t\t\tprint(\"\")\n\t\t\t# allow the user to choose one of the functions in the menu\n\t\t\tchoice = input(\"Please input your choice (1-6): \")\n\t\t\tprint(\"\")\n\t\t\t# check the input and call the correspondence function\n\t\t\tif (choice == \"1\"):\n\t\t\t\tcollectionDroppingAndEmptyCollectionCreatingHandler(db)\n\t\t\telif (choice == \"2\"):\n\t\t\t\tdataCrawlingHandler()\n\t\t\telif (choice == \"3\"):\n\t\t\t\tcourseSearchByKeywordHandler(db)\n\t\t\telif (choice == \"4\"):\n\t\t\t\tcourseSearchByWaitingListSizeHandler(db)\n\t\t\telif (choice == \"5\"):\n\t\t\t\twaitingListSizePredictionHandler(db)\n\t\t\telif (choice == \"6\"):\n\t\t\t\tprint(\"\")\n\t\t\telse:\n\t\t\t\tprint(\"Invalid Input!\")\n\t\t\tclient.close()\n\texcept pymongo.errors.ConnectionFailure as error: \n\t\tprint(\"DB Connection Failed! Error Message: \\\"{}\\\"\".format(error))\t\n\nmain()\n",
"step-ids": [
6,
13,
14,
18,
19
]
}
|
[
6,
13,
14,
18,
19
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Book', fields=[('name',
models.CharField(max_length=250)), ('slug', models.SlugField(
max_length=25, primary_key=True, serialize=False, unique=True)), (
'author', models.CharField(max_length=250)), ('was_buplished',
models.DateField())]), migrations.CreateModel(name='Alias', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('alias', models.CharField(
max_length=250)), ('start', models.DateTimeField()), ('end', models
.DateTimeField(default=None)), ('target', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Book', fields=[('name',
models.CharField(max_length=250)), ('slug', models.SlugField(
max_length=25, primary_key=True, serialize=False, unique=True)), (
'author', models.CharField(max_length=250)), ('was_buplished',
models.DateField())]), migrations.CreateModel(name='Alias', fields=
[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('alias', models.CharField(
max_length=250)), ('start', models.DateTimeField()), ('end', models
.DateTimeField(default=None)), ('target', models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]
<|reserved_special_token_1|>
# Generated by Django 3.1.6 on 2021-02-15 12:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),
('author', models.CharField(max_length=250)),
('was_buplished', models.DateField()),
],
),
migrations.CreateModel(
name='Alias',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('alias', models.CharField(max_length=250)),
('start', models.DateTimeField()),
('end', models.DateTimeField(default=None)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),
],
),
]
|
flexible
|
{
"blob_id": "6239cb08509b8e84a88db95479af05845876d9b6",
"index": 1502,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Book', fields=[('name',\n models.CharField(max_length=250)), ('slug', models.SlugField(\n max_length=25, primary_key=True, serialize=False, unique=True)), (\n 'author', models.CharField(max_length=250)), ('was_buplished',\n models.DateField())]), migrations.CreateModel(name='Alias', fields=\n [('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('alias', models.CharField(\n max_length=250)), ('start', models.DateTimeField()), ('end', models\n .DateTimeField(default=None)), ('target', models.ForeignKey(\n on_delete=django.db.models.deletion.PROTECT, to='alias.book'))])]\n",
"step-5": "# Generated by Django 3.1.6 on 2021-02-15 12:13\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Book',\n fields=[\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(max_length=25, primary_key=True, serialize=False, unique=True)),\n ('author', models.CharField(max_length=250)),\n ('was_buplished', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='Alias',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('alias', models.CharField(max_length=250)),\n ('start', models.DateTimeField()),\n ('end', models.DateTimeField(default=None)),\n ('target', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='alias.book')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(str1 and str2)
<|reserved_special_token_0|>
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
<|reserved_special_token_1|>
str1 = '12345678'
str2 = '456'
print(str1 and str2)
str1 = 'cekjgdklab'
str2 = 'gka'
nPos = -1
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
<|reserved_special_token_1|>
# strspn(str1,str2)
str1 = '12345678'
str2 = '456'
# str1 and chars both in str1 and str2
print(str1 and str2)
str1 = 'cekjgdklab'
str2 = 'gka'
nPos = -1
for c in str1:
if c in str2:
nPos = str1.index(c)
break
print(nPos)
|
flexible
|
{
"blob_id": "5c30b0e952ddf2e05a7ad5f8d9bbd4f5e22f887d",
"index": 62,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(str1 and str2)\n<mask token>\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n",
"step-3": "str1 = '12345678'\nstr2 = '456'\nprint(str1 and str2)\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n",
"step-4": "# strspn(str1,str2)\nstr1 = '12345678'\nstr2 = '456'\n# str1 and chars both in str1 and str2\nprint(str1 and str2)\n\nstr1 = 'cekjgdklab'\nstr2 = 'gka'\nnPos = -1\nfor c in str1:\n if c in str2:\n nPos = str1.index(c)\n break\nprint(nPos)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from basic_app.models import UserProfileInfo
admin.site.register(UserProfileInfo)
# we do not need to register User() default form since it comes
# with the default admin site in Django itself.
|
normal
|
{
"blob_id": "624212a1d73ff3a3b3092ffa27912a6ae25a2484",
"index": 6826,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(UserProfileInfo)\n",
"step-3": "from django.contrib import admin\nfrom basic_app.models import UserProfileInfo\nadmin.site.register(UserProfileInfo)\n",
"step-4": "from django.contrib import admin\nfrom basic_app.models import UserProfileInfo\n\nadmin.site.register(UserProfileInfo)\n\n# we do not need to register User() default form since it comes\n# with the default admin site in Django itself.\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Read all the images from a directory,
resize, rescale and rename them.
"""
|
normal
|
{
"blob_id": "670efbd9879099b24a87e19a531c4e3bbce094c6",
"index": 1666,
"step-1": "<mask token>\n",
"step-2": "\n\n\"\"\"\nRead all the images from a directory,\nresize, rescale and rename them.\n\"\"\"\n\n\n\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print(
'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'
)
<|reserved_special_token_0|>
while count < 11:
numero = int(input('Introduzca su %d numero:' % count))
lista.append(numero)
count = count + 1
<|reserved_special_token_0|>
for element in lista:
if element > 999 or element < -999:
listanueva.append(lista.index(element))
<|reserved_special_token_0|>
print('Los numeros con mas de tres digitos se encuentran en las posiciones',
posiciones)
input()
<|reserved_special_token_1|>
print(
'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'
)
count = 1
lista = []
while count < 11:
numero = int(input('Introduzca su %d numero:' % count))
lista.append(numero)
count = count + 1
listanueva = []
s = ','
for element in lista:
if element > 999 or element < -999:
listanueva.append(lista.index(element))
posiciones = ','.join(str(x) for x in listanueva)
print('Los numeros con mas de tres digitos se encuentran en las posiciones',
posiciones)
input()
<|reserved_special_token_1|>
print("Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos")
count=1
lista=[]
while count<11:
numero=int(input('Introduzca su %d numero:' %(count)))
lista.append(numero)
count=count+1
listanueva=[]
s= ','
for element in lista:
if element > 999 or element<-999:
listanueva.append(lista.index(element))
posiciones= ','.join(str (x) for x in listanueva)
print("Los numeros con mas de tres digitos se encuentran en las posiciones",posiciones)
input()
|
flexible
|
{
"blob_id": "9dd5db441044c808274493f16a912d1b65a6c28b",
"index": 5911,
"step-1": "<mask token>\n",
"step-2": "print(\n 'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'\n )\n<mask token>\nwhile count < 11:\n numero = int(input('Introduzca su %d numero:' % count))\n lista.append(numero)\n count = count + 1\n<mask token>\nfor element in lista:\n if element > 999 or element < -999:\n listanueva.append(lista.index(element))\n<mask token>\nprint('Los numeros con mas de tres digitos se encuentran en las posiciones',\n posiciones)\ninput()\n",
"step-3": "print(\n 'Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos'\n )\ncount = 1\nlista = []\nwhile count < 11:\n numero = int(input('Introduzca su %d numero:' % count))\n lista.append(numero)\n count = count + 1\nlistanueva = []\ns = ','\nfor element in lista:\n if element > 999 or element < -999:\n listanueva.append(lista.index(element))\nposiciones = ','.join(str(x) for x in listanueva)\nprint('Los numeros con mas de tres digitos se encuentran en las posiciones',\n posiciones)\ninput()\n",
"step-4": "print(\"Leer 10 números enteros, almacenarlos en un vector y determinar en qué posiciones se encuentran los números con mas de 3 dígitos\")\r\n\r\n\r\ncount=1\r\nlista=[]\r\nwhile count<11: \r\n numero=int(input('Introduzca su %d numero:' %(count)))\r\n lista.append(numero)\r\n count=count+1\r\nlistanueva=[]\r\ns= ','\r\n \r\nfor element in lista:\r\n\r\n if element > 999 or element<-999:\r\n listanueva.append(lista.index(element))\r\n\r\nposiciones= ','.join(str (x) for x in listanueva)\r\nprint(\"Los numeros con mas de tres digitos se encuentran en las posiciones\",posiciones)\r\ninput()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
routeList = ('payment', PaymentViewSet),
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from rolca.payment.api.views import PaymentViewSet
routeList = ('payment', PaymentViewSet),
<|reserved_special_token_1|>
""".. Ignore pydocstyle D400."""
from rolca.payment.api.views import (
PaymentViewSet,
)
routeList = ((r'payment', PaymentViewSet),)
|
flexible
|
{
"blob_id": "2bfdc259bcd5ff058ee8661a14afd8a915b8372b",
"index": 7020,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrouteList = ('payment', PaymentViewSet),\n",
"step-3": "<mask token>\nfrom rolca.payment.api.views import PaymentViewSet\nrouteList = ('payment', PaymentViewSet),\n",
"step-4": "\"\"\".. Ignore pydocstyle D400.\"\"\"\nfrom rolca.payment.api.views import (\n PaymentViewSet,\n)\n\nrouteList = ((r'payment', PaymentViewSet),)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
<|reserved_special_token_0|>
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = 'categories',
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Advert, Category, ImageAd
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = 'categories',
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
<|reserved_special_token_1|>
from django.contrib import admin
from .models import Advert, Category, ImageAd
@admin.register(Advert)
class AdminAdvert(admin.ModelAdmin):
filter_horizontal = "categories",
@admin.register(Category)
class AdminCategory(admin.ModelAdmin):
pass
@admin.register(ImageAd)
class AdminImageAd(admin.ModelAdmin):
pass
|
flexible
|
{
"blob_id": "fdcee5b3f6b3ec170c9ef3017e0cc6c4b28cf22d",
"index": 454,
"step-1": "<mask token>\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-2": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n <mask token>\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-3": "<mask token>\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-4": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = 'categories',\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-5": "from django.contrib import admin\nfrom .models import Advert, Category, ImageAd\n\n\[email protected](Advert)\nclass AdminAdvert(admin.ModelAdmin):\n filter_horizontal = \"categories\",\n\n\n\[email protected](Category)\nclass AdminCategory(admin.ModelAdmin):\n pass\n\n\[email protected](ImageAd)\nclass AdminImageAd(admin.ModelAdmin):\n pass\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class GpuThread(threading.Thread):
<|reserved_special_token_0|>
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.
right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else
''))
nodule_df['lung'] = lungs
nodule_df['lobe'] = lobes
nodule_df['lobel_info'] = lobel_info
return nodule_df
<|reserved_special_token_1|>
import threading
import time
import pickle
from utils.ret_utils import error_info
from nodule_class.isnodule import LungIsncls
from preprocessing.location import lobe_locate_gmm
from detection.lung_detection import LungDetection
from func_timeout import FunctionTimedOut
from func_timeout import func_set_timeout
import weakref
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.
right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else
''))
nodule_df['lung'] = lungs
nodule_df['lobe'] = lobes
nodule_df['lobel_info'] = lobel_info
return nodule_df
<|reserved_special_token_1|>
import threading
import time
# import numpy as np
import pickle
from utils.ret_utils import error_info
from nodule_class.isnodule import LungIsncls
from preprocessing.location import lobe_locate_gmm
from detection.lung_detection import LungDetection
from func_timeout import FunctionTimedOut
from func_timeout import func_set_timeout
import weakref
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection("./model/det.ckpt", self.index)
# is nodule cls
self.lung_isnc = LungIsncls("./model/isn.ckpt", self.index)
l_u = pickle._Unpickler(open("./model/left_gmm.pkl", "rb"))
l_u.encoding = "latin1"
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open("./model/right_gmm.pkl", "rb"))
r_u.encoding = "latin1"
self.right_gmm = r_u.load()
# cudnn.benchmark = True
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(
time.ctime(),
" ",
result_dict["json_id"],
" Using GPU Device ",
self.index,
)
t_s = time.time()
nodule_df = self.lung_dete.prediction(
result_dict["prep_data"],
result_dict["prep_spac"],
result_dict["prep_ebox"],
result_dict["prep_mask"],
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung dete prediction):",
time.time() - t_s,
)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(
nodule_df, result_dict["prep_case"], result_dict["prep_spac"]
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung isnc nodule cls):",
time.time() - t_s,
)
# preb = lung_isnc.nodule_cls(nodule_df, result_dict['prep_case'], result_dict['prep_spac'])
# del lung_isnc
t_s = time.time()
preb = self.lung_lobe(preb, result_dict["prep_mask"])
result_dict["nodule_preb"] = preb
self.que_ret.put(result_dict, timeout=2)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING US TIME(lung lobe):",
time.time() - t_s,
)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict["json_id"], "GPU FUN TIMEOUT ")
except Exception as e:
if result_dict and "json_id" in result_dict.keys():
print(
time.ctime()
+ "GPU ERROR : {} {}".format(e, result_dict["json_id"])
)
error_info(200, result_dict)
else:
print(time.ctime() + "GPU ERROR : {}".format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[["coordX", "coordY", "coordZ"]].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + "肺" + (lobe + "叶" if not lobe == "" else ""))
nodule_df["lung"] = lungs
nodule_df["lobe"] = lobes
nodule_df["lobel_info"] = lobel_info
return nodule_df
|
flexible
|
{
"blob_id": "8035f195cd01dc50691cd93ea91a6377b1d83f24",
"index": 1166,
"step-1": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n <mask token>\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values\n lungs = []\n lobes = []\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.\n right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else\n ''))\n nodule_df['lung'] = lungs\n nodule_df['lobe'] = lobes\n nodule_df['lobel_info'] = lobel_info\n return nodule_df\n",
"step-4": "import threading\nimport time\nimport pickle\nfrom utils.ret_utils import error_info\nfrom nodule_class.isnodule import LungIsncls\nfrom preprocessing.location import lobe_locate_gmm\nfrom detection.lung_detection import LungDetection\nfrom func_timeout import FunctionTimedOut\nfrom func_timeout import func_set_timeout\nimport weakref\n\n\nclass GpuThread(threading.Thread):\n\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n self.index = index\n self.lung_dete = LungDetection('./model/det.ckpt', self.index)\n self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)\n l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))\n l_u.encoding = 'latin1'\n self.left_gmm = l_u.load()\n r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))\n r_u.encoding = 'latin1'\n self.right_gmm = r_u.load()\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(time.ctime(), ' ', result_dict['json_id'],\n ' Using GPU Device ', self.index)\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(result_dict[\n 'prep_data'], result_dict['prep_spac'], result_dict[\n 'prep_ebox'], result_dict['prep_mask'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung dete prediction):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[\n 'prep_case'], result_dict['prep_spac'])\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING USE TIME(lung isnc nodule cls):', time.time(\n ) - t_s)\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict['prep_mask'])\n result_dict['nodule_preb'] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(time.ctime(), ' ', result_dict['json_id'],\n 'GPU DOING US TIME(lung lobe):', time.time() - t_s)\n i += 1\n del result_dict, nodule_df, preb\n except FunctionTimedOut:\n print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')\n except Exception as e:\n if result_dict and 'json_id' in result_dict.keys():\n print(time.ctime() + 'GPU ERROR : {} {}'.format(e,\n result_dict['json_id']))\n error_info(200, result_dict)\n else:\n print(time.ctime() + 'GPU ERROR : {}'.format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values\n lungs = []\n lobes = []\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.\n right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else\n ''))\n nodule_df['lung'] = lungs\n nodule_df['lobe'] = lobes\n nodule_df['lobel_info'] = lobel_info\n return nodule_df\n",
"step-5": "import threading\nimport time\n\n# import numpy as np\nimport pickle\nfrom utils.ret_utils import error_info\nfrom nodule_class.isnodule import LungIsncls\nfrom preprocessing.location import lobe_locate_gmm\nfrom detection.lung_detection import LungDetection\nfrom func_timeout import FunctionTimedOut\nfrom func_timeout import func_set_timeout\nimport weakref\n\n\nclass GpuThread(threading.Thread):\n def __init__(self, que_det, que_ret, index):\n threading.Thread.__init__(self)\n self.que_det = que_det\n self.que_ret = que_ret\n\n self.index = index\n self.lung_dete = LungDetection(\"./model/det.ckpt\", self.index)\n # is nodule cls\n self.lung_isnc = LungIsncls(\"./model/isn.ckpt\", self.index)\n\n l_u = pickle._Unpickler(open(\"./model/left_gmm.pkl\", \"rb\"))\n l_u.encoding = \"latin1\"\n self.left_gmm = l_u.load()\n\n r_u = pickle._Unpickler(open(\"./model/right_gmm.pkl\", \"rb\"))\n r_u.encoding = \"latin1\"\n self.right_gmm = r_u.load()\n # cudnn.benchmark = True\n\n def run(self):\n i = 0\n while True:\n result_dict = self.que_det.get(block=True)\n try:\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \" Using GPU Device \",\n self.index,\n )\n t_s = time.time()\n nodule_df = self.lung_dete.prediction(\n result_dict[\"prep_data\"],\n result_dict[\"prep_spac\"],\n result_dict[\"prep_ebox\"],\n result_dict[\"prep_mask\"],\n )\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING USE TIME(lung dete prediction):\",\n time.time() - t_s,\n )\n t_s = time.time()\n preb = self.lung_isnc.nodule_cls(\n nodule_df, result_dict[\"prep_case\"], result_dict[\"prep_spac\"]\n )\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING USE TIME(lung isnc nodule cls):\",\n time.time() - t_s,\n )\n # preb = lung_isnc.nodule_cls(nodule_df, result_dict['prep_case'], result_dict['prep_spac'])\n # del lung_isnc\n t_s = time.time()\n preb = self.lung_lobe(preb, result_dict[\"prep_mask\"])\n result_dict[\"nodule_preb\"] = preb\n self.que_ret.put(result_dict, timeout=2)\n print(\n time.ctime(),\n \" \",\n result_dict[\"json_id\"],\n \"GPU DOING US TIME(lung lobe):\",\n time.time() - t_s,\n )\n i += 1\n del result_dict, nodule_df, preb\n\n except FunctionTimedOut:\n print(time.ctime(), result_dict[\"json_id\"], \"GPU FUN TIMEOUT \")\n except Exception as e:\n if result_dict and \"json_id\" in result_dict.keys():\n print(\n time.ctime()\n + \"GPU ERROR : {} {}\".format(e, result_dict[\"json_id\"])\n )\n error_info(200, result_dict)\n else:\n print(time.ctime() + \"GPU ERROR : {}\".format(e))\n\n @func_set_timeout(5)\n def lung_lobe(self, nodule_df, mask):\n nodule_df_values = nodule_df[[\"coordX\", \"coordY\", \"coordZ\"]].values\n lungs = []\n lobes = []\n\n lobel_info = []\n for nodule in nodule_df_values:\n lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.right_gmm)\n lungs.append(lung)\n lobes.append(lobe)\n lobel_info.append(lung + \"肺\" + (lobe + \"叶\" if not lobe == \"\" else \"\"))\n nodule_df[\"lung\"] = lungs\n nodule_df[\"lobe\"] = lobes\n\n nodule_df[\"lobel_info\"] = lobel_info\n return nodule_df\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
<|reserved_special_token_0|>
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print(' ' * indent + self.name)
for c in self.children:
c.display(indent + 1)
<|reserved_special_token_0|>
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
<|reserved_special_token_0|>
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
<|reserved_special_token_0|>
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
<|reserved_special_token_0|>
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
<|reserved_special_token_0|>
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
<|reserved_special_token_0|>
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
<|reserved_special_token_0|>
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,
breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
def selectindex():
return int(log(random()) / log(pexp))
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0:
break
newpop = [scores[0][1], scores[1][1]]
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(crossover(scores[selectindex()][1],
scores[selectindex()][1], probswap=breedingrate), pc,
probchange=mutationrate))
else:
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
<|reserved_special_token_0|>
class humanplayer:
def evaluate(self, board):
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O', end=' ')
elif (i, j) in others:
print('X', end=' ')
else:
print('.', end=' ')
print()
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
<|reserved_special_token_0|>
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print(' ' * indent + self.name)
for c in self.children:
c.display(indent + 1)
<|reserved_special_token_0|>
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
<|reserved_special_token_0|>
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
<|reserved_special_token_0|>
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
<|reserved_special_token_0|>
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
<|reserved_special_token_0|>
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
<|reserved_special_token_0|>
def mutate(t, pc, probchange=0.1):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if hasattr(t, 'children'):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
<|reserved_special_token_0|>
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
<|reserved_special_token_0|>
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,
breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
def selectindex():
return int(log(random()) / log(pexp))
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0:
break
newpop = [scores[0][1], scores[1][1]]
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(crossover(scores[selectindex()][1],
scores[selectindex()][1], probswap=breedingrate), pc,
probchange=mutationrate))
else:
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
<|reserved_special_token_0|>
def tournament(pl):
losses = [(0) for p in pl]
for i in range(len(pl)):
for j in range(len(pl)):
if i == j:
continue
winner = gridgame([pl[i], pl[j]])
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
z = list(zip(losses, pl))
z.sort(key=lambda t: t[0])
print(z[0][1].display(indent=4))
return z
class humanplayer:
def evaluate(self, board):
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O', end=' ')
elif (i, j) in others:
print('X', end=' ')
else:
print('.', end=' ')
print()
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
<|reserved_special_token_0|>
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print(' ' * indent + self.name)
for c in self.children:
c.display(indent + 1)
<|reserved_special_token_0|>
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
<|reserved_special_token_0|>
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
<|reserved_special_token_0|>
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
<|reserved_special_token_0|>
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
<|reserved_special_token_0|>
def exampletree():
return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [
paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]
)
<|reserved_special_token_0|>
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in
range(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
<|reserved_special_token_0|>
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
<|reserved_special_token_0|>
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
<|reserved_special_token_0|>
def mutate(t, pc, probchange=0.1):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if hasattr(t, 'children'):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
<|reserved_special_token_0|>
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
<|reserved_special_token_0|>
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,
breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
def selectindex():
return int(log(random()) / log(pexp))
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0:
break
newpop = [scores[0][1], scores[1][1]]
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(crossover(scores[selectindex()][1],
scores[selectindex()][1], probswap=breedingrate), pc,
probchange=mutationrate))
else:
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
<|reserved_special_token_0|>
def tournament(pl):
losses = [(0) for p in pl]
for i in range(len(pl)):
for j in range(len(pl)):
if i == j:
continue
winner = gridgame([pl[i], pl[j]])
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
z = list(zip(losses, pl))
z.sort(key=lambda t: t[0])
print(z[0][1].display(indent=4))
return z
class humanplayer:
def evaluate(self, board):
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O', end=' ')
elif (i, j) in others:
print('X', end=' ')
else:
print('.', end=' ')
print()
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
<|reserved_special_token_0|>
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print(' ' * indent + self.name)
for c in self.children:
c.display(indent + 1)
<|reserved_special_token_0|>
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
<|reserved_special_token_0|>
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
<|reserved_special_token_0|>
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
<|reserved_special_token_0|>
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
<|reserved_special_token_0|>
def exampletree():
return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [
paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]
)
<|reserved_special_token_0|>
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in
range(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def hiddenfunction(x, y):
return x ** 2 + 2 * y + 3 * x + 5
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
<|reserved_special_token_0|>
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
<|reserved_special_token_0|>
def mutate(t, pc, probchange=0.1):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if hasattr(t, 'children'):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
<|reserved_special_token_0|>
def crossover(t1, t2, probswap=0.7, top=1):
if random() < probswap and not top:
return deepcopy(t2)
else:
result = deepcopy(t1)
if hasattr(t1, 'children') and hasattr(t2, 'children'):
result.children = [crossover(c, choice(t2.children), probswap,
0) for c in t1.children]
return result
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
<|reserved_special_token_0|>
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,
breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
def selectindex():
return int(log(random()) / log(pexp))
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0:
break
newpop = [scores[0][1], scores[1][1]]
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(crossover(scores[selectindex()][1],
scores[selectindex()][1], probswap=breedingrate), pc,
probchange=mutationrate))
else:
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
def gridgame(p):
max = 3, 3
lastmove = [-1, -1]
location = [[randint(0, max[0]), randint(0, max[1])]]
location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])
for o in range(50):
for i in range(2):
locs = location[i][:] + location[1 - i][:]
locs.append(lastmove[i])
move = p[i].evaluate(locs) % 4
if lastmove[i] == move:
return 1 - i
lastmove[i] = move
if move == 0:
location[i][0] -= 1
if location[i][0] < 0:
location[i][0] = 0
if move == 1:
location[i][0] += 1
if location[i][0] > max[0]:
location[i][0] = max[0]
if move == 2:
location[i][1] -= 1
if location[i][1] < 0:
location[i][1] = 0
if move == 3:
location[i][1] += 1
if location[i][1] > max[1]:
location[i][1] = max[1]
if location[i] == location[1 - i]:
return i
return -1
def tournament(pl):
losses = [(0) for p in pl]
for i in range(len(pl)):
for j in range(len(pl)):
if i == j:
continue
winner = gridgame([pl[i], pl[j]])
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
z = list(zip(losses, pl))
z.sort(key=lambda t: t[0])
print(z[0][1].display(indent=4))
return z
class humanplayer:
def evaluate(self, board):
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O', end=' ')
elif (i, j) in others:
print('X', end=' ')
else:
print('.', end=' ')
print()
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from random import random, randint, choice
from copy import deepcopy
from math import log
"""
Обертка для функций, которые будут находиться в узлах,
представляющих функции. Его члены – имя функции, сама функция
и количество принимаемых параметров.
"""
class fwrapper:
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
"""
Класс функциональных узлов (имеющих потомков). Инициализируется экземпляром класса fwrapper.
Метод evaluate вычисляет значения дочерних узлов и передает их представленной данным узлом
функции в качестве параметров.
"""
class node:
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
# Метод display выводит представление дерева в виде строки
def display(self, indent=0):
print((' ' * indent) + self.name)
for c in self.children:
c.display(indent + 1)
"""
Класс узлов, которые просто возвращают один из переданных программе параметров.
Его метод evaluate возвращает параметр, соответствующий значению idx.
"""
class paramnode:
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
# Это метод просто печатает индекс возвращаемого параметра
def display(self, indent=0):
print('%sp%d' % (' ' * indent, self.idx))
"""
Узлы, возвращающие константы. Метод evaluate просто возвращает
то значение, которым экземпляр был инициализирован.
"""
class constnode:
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print('%s%d' % (' ' * indent, self.v))
"""
Простые функции типа add и subtract можно встроить с помощью лямбда-выражений.
Для остальных функцию придется написать в отдельном блоке.
В любом случае функция обертывается в экземпляр класса fwrapper
вместе со своим именем и числом параметров.
"""
addw = fwrapper(lambda l: l[0] + l[1], 2, 'add')
subw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')
mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
ifw = fwrapper(iffunc, 3, 'if')
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
gtw = fwrapper(isgreater, 2, 'isgreater')
# В этой строке создается список всех функций, чтобы впоследствии из него
# можно было выбирать элементы случайным образом.
flist = [addw, mulw, ifw, gtw, subw]
# C помощью класса node можно построить дерево программы (в качестве примера)
def exampletree():
return node(ifw, [
node(gtw, [paramnode(0), constnode(3)]),
node(addw, [paramnode(1), constnode(5)]),
node(subw, [paramnode(1), constnode(2)]),
]
)
"""
Эта функция создает узел, содержащий случайно выбранную функцию, и проверяет,
сколько у этой функции должно быть параметров. Для каждого дочернего узла функция
вызывает себя рекурсивно, чтобы создать новый узел. Так конструируется все дерево,
причем процесс построения ветвей завершается в тот момент, когда у очередного узла
нет дочерних (то есть он представляет либо константу, либо переменную-параметр).
Параметр pc равен числу параметров, принимаемых деревом на входе. Параметр fpr
задает вероятность того, что вновь создаваемый узел будет соответствовать функции,
а ppr – вероятность того, что узел, не являющийся функцией, будет иметь тип paramnode.
"""
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)
for i in range(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def hiddenfunction(x, y):
return x ** 2 + 2 * y + 3 * x + 5
def buildhiddenset():
rows = []
for i in range(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
"""
Эта функция перебирает все строки набора данных, вычисляет функцию от указанных
в ней аргументов и сравнивает с результатом. Абсолютные значения разностей суммируются.
Чем меньше сумма, тем лучше программа, а значение 0 говорит о том, что все результаты
в точности совпали.
"""
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
"""
Эта функция начинает с корня дерева и решает, следует ли изменить
узел. Если нет, она рекурсивно вызывает mutate для дочерних узлов.
Может случиться, что мутации подвергнутся все узлы, а иногда дерево
вообще не изменится.
"""
# Мутация путем замены поддерева
def mutate(t, pc, probchange=0.1):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if hasattr(t, "children"):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
"""
Функции, выполняющей скрещивание, передаются два дерева, и она
обходит оба. Если случайно выбранное число не превышает пороговой
вероятности, то функция возвращает копию первого дерева, в которой
одна из ветвей заменена какой-то ветвью, взятой из второго дерева.
Поскольку обход выполняется параллельно, то скрещивание произойдет примерно на одном уровне каждого дерева.
"""
# Функция скрещивания. Две успешные программы комбинируются с целью получения новой программы.
def crossover(t1, t2, probswap=0.7, top=1):
if random() < probswap and not top:
return deepcopy(t2)
else:
result = deepcopy(t1)
if hasattr(t1, 'children') and hasattr(t2, 'children'):
result.children = [crossover(c, choice(t2.children), probswap, 0)
for c in t1.children]
return result
# Функция возвращает функцию ранжирования для имеющегося набора данных
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
"""
Создание конкурентной среды, в которой программы будут эволюционировать.
Смысл в том, чтобы создать набор случайных программ, отобрать из них
наилучшие для копирования и модификации и повторять процесс, пока не будет
выполнено некое условие останова.
"""
def evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):
"""Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,
вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.
Наилучшая программа автоматически попадает в следующее поколение без изменения.
Args:
rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.
mutationrate: Вероятность мутации, передаваемая функции mutate.
breedingrate: Вероятность скрещивания, передаваемая функции crossover.
popsize: Размер исходной популяции.
probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/
probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.
Returns:
tuple: Найденное наилучшее совпадние
"""
# Возвращает случайное число, отдавая предпочтение более маленьким числам.
# Чем меньше значение pexp, тем больше будет доля маленьких чисел.
def selectindex():
return int(log(random()) / log(pexp))
# Создаем случайную исходную популяцию
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print(scores[0][0])
if scores[0][0] == 0: break
# Две наилучшие особи отбираются всегда
newpop = [scores[0][1], scores[1][1]]
# Строим следующее поколение
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(
crossover(scores[selectindex()][1],
scores[selectindex()][1],
probswap=breedingrate),
pc, probchange=mutationrate))
else:
# Добавляем случайный узел для внесения неопределенности
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
#[
# (10, "program1"),
# (17, "program2"),
#]
def gridgame(p):
# Размер доски
max = (3, 3)
# Запоминаем последний ход каждого игрока
lastmove = [-1, -1]
# Запоминаем положения игроков
location = [[randint(0, max[0]), randint(0, max[1])]]
# Располагаем второго игрока на достаточном удалении от первого
location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])
# Не более 50 ходов до объявления ничьей
for o in range(50):
# Для каждого игрока
for i in range(2):
locs = location[i][:] + location[1 - i][:]
locs.append(lastmove[i])
move = p[i].evaluate(locs) % 4
# Если игрок два раза подряд ходит в одном направлении, ему
# засчитывается проигрыш
if lastmove[i] == move: return 1 - i
lastmove[i] = move
if move == 0:
location[i][0] -= 1
# Доска ограничена
if location[i][0] < 0: location[i][0] = 0
if move == 1:
location[i][0] += 1
if location[i][0] > max[0]: location[i][0] = max[0]
if move == 2:
location[i][1] -= 1
if location[i][1] < 0: location[i][1] = 0
if move == 3:
location[i][1] += 1
if location[i][1] > max[1]: location[i][1] = max[1]
# Если противник захвачен в плен, вы выиграли
if location[i] == location[1 - i]: return i
return -1
def tournament(pl):
# Массив для подсчета проигрышей
losses = [0 for p in pl]
# Каждый игрок встречается со всеми другими
for i in range(len(pl)):
for j in range(len(pl)):
if i == j: continue
# Кто выиграл?
winner = gridgame([pl[i], pl[j]])
# Два очка за поражение, одно за ничью
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
# Отсортировать и вернуть результаты
z = list(zip(losses, pl))
z.sort(key=lambda t: t[0])
# input()
print(z[0][1].display(indent=4))
return z
class humanplayer:
def evaluate(self, board):
# Получить мою позицию и позиции других игроков
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
# Нарисовать доску
for i in range(4):
for j in range(4):
if (i, j) == me:
print('O',end=' ')
elif (i, j) in others:
print('X',end=' ')
else:
print('.',end=' ')
print()
# Показать ходы, для справки
print('Your last move was %d' % board[len(board) - 1])
print(' 0')
print('2 3')
print(' 1')
print('Enter move: ')
# Вернуть введенное пользователем число
move = int(input())
return move
class fwrapper:
def __init__(self, function, params, name):
self.function = function
self.childcount = params
self.name = name
# flist={'str':[substringw,concatw],'int':[indexw]}
flist = [addw, mulw, ifw, gtw, subw]
|
flexible
|
{
"blob_id": "89881f3cc6703b3f43f5d2dae87fa943d8a21513",
"index": 5485,
"step-1": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef isgreater(l):\n if l[0] > l[1]:\n return 1\n else:\n return 0\n\n\n<mask token>\n\n\ndef exampletree():\n return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [\n paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]\n )\n\n\n<mask token>\n\n\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\n if random() < fpr and maxdepth > 0:\n f = choice(flist)\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in\n range(f.childcount)]\n return node(f, children)\n elif random() < ppr:\n return paramnode(randint(0, pc - 1))\n else:\n return constnode(randint(0, 10))\n\n\n<mask token>\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\n<mask token>\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass fwrapper:\n\n def __init__(self, function, childcount, name):\n self.function = function\n self.childcount = childcount\n self.name = name\n\n\n<mask token>\n\n\nclass node:\n\n def __init__(self, fw, children):\n self.function = fw.function\n self.name = fw.name\n self.children = children\n\n def evaluate(self, inp):\n results = [n.evaluate(inp) for n in self.children]\n return self.function(results)\n\n def display(self, indent=0):\n print(' ' * indent + self.name)\n for c in self.children:\n c.display(indent + 1)\n\n\n<mask token>\n\n\nclass paramnode:\n\n def __init__(self, idx):\n self.idx = idx\n\n def evaluate(self, inp):\n return inp[self.idx]\n\n def display(self, indent=0):\n print('%sp%d' % (' ' * indent, self.idx))\n\n\n<mask token>\n\n\nclass constnode:\n\n def __init__(self, v):\n self.v = v\n\n def evaluate(self, inp):\n return self.v\n\n def display(self, indent=0):\n print('%s%d' % (' ' * indent, self.v))\n\n\n<mask token>\n\n\ndef iffunc(l):\n if l[0] > 0:\n return l[1]\n else:\n return l[2]\n\n\n<mask token>\n\n\ndef isgreater(l):\n if l[0] > l[1]:\n return 1\n else:\n return 0\n\n\n<mask token>\n\n\ndef exampletree():\n return node(ifw, [node(gtw, [paramnode(0), constnode(3)]), node(addw, [\n paramnode(1), constnode(5)]), node(subw, [paramnode(1), constnode(2)])]\n )\n\n\n<mask token>\n\n\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\n if random() < fpr and maxdepth > 0:\n f = choice(flist)\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr) for i in\n range(f.childcount)]\n return node(f, children)\n elif random() < ppr:\n return paramnode(randint(0, pc - 1))\n else:\n return constnode(randint(0, 10))\n\n\ndef hiddenfunction(x, y):\n return x ** 2 + 2 * y + 3 * x + 5\n\n\ndef buildhiddenset():\n rows = []\n for i in range(200):\n x = randint(0, 40)\n y = randint(0, 40)\n rows.append([x, y, hiddenfunction(x, y)])\n return rows\n\n\n<mask token>\n\n\ndef scorefunction(tree, s):\n dif = 0\n for data in s:\n v = tree.evaluate([data[0], data[1]])\n dif += abs(v - data[2])\n return dif\n\n\n<mask token>\n\n\ndef mutate(t, pc, probchange=0.1):\n if random() < probchange:\n return makerandomtree(pc)\n else:\n result = deepcopy(t)\n if hasattr(t, 'children'):\n result.children = [mutate(c, pc, probchange) for c in t.children]\n return result\n\n\n<mask token>\n\n\ndef crossover(t1, t2, probswap=0.7, top=1):\n if random() < probswap and not top:\n return deepcopy(t2)\n else:\n result = deepcopy(t1)\n if hasattr(t1, 'children') and hasattr(t2, 'children'):\n result.children = [crossover(c, choice(t2.children), probswap, \n 0) for c in t1.children]\n return result\n\n\ndef getrankfunction(dataset):\n\n def rankfunction(population):\n scores = [(scorefunction(t, dataset), t) for t in population]\n scores.sort()\n return scores\n return rankfunction\n\n\n<mask token>\n\n\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1,\n breedingrate=0.4, pexp=0.7, pnew=0.05):\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \n Args:\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\n mutationrate: Вероятность мутации, передаваемая функции mutate.\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\n popsize: Размер исходной популяции.\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\n\n Returns:\n tuple: Найденное наилучшее совпадние\n\n \"\"\"\n\n def selectindex():\n return int(log(random()) / log(pexp))\n population = [makerandomtree(pc) for i in range(popsize)]\n for i in range(maxgen):\n scores = rankfunction(population)\n print(scores[0][0])\n if scores[0][0] == 0:\n break\n newpop = [scores[0][1], scores[1][1]]\n while len(newpop) < popsize:\n if random() > pnew:\n newpop.append(mutate(crossover(scores[selectindex()][1],\n scores[selectindex()][1], probswap=breedingrate), pc,\n probchange=mutationrate))\n else:\n newpop.append(makerandomtree(pc))\n population = newpop\n scores[0][1].display()\n return scores[0][1]\n\n\ndef gridgame(p):\n max = 3, 3\n lastmove = [-1, -1]\n location = [[randint(0, max[0]), randint(0, max[1])]]\n location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])\n for o in range(50):\n for i in range(2):\n locs = location[i][:] + location[1 - i][:]\n locs.append(lastmove[i])\n move = p[i].evaluate(locs) % 4\n if lastmove[i] == move:\n return 1 - i\n lastmove[i] = move\n if move == 0:\n location[i][0] -= 1\n if location[i][0] < 0:\n location[i][0] = 0\n if move == 1:\n location[i][0] += 1\n if location[i][0] > max[0]:\n location[i][0] = max[0]\n if move == 2:\n location[i][1] -= 1\n if location[i][1] < 0:\n location[i][1] = 0\n if move == 3:\n location[i][1] += 1\n if location[i][1] > max[1]:\n location[i][1] = max[1]\n if location[i] == location[1 - i]:\n return i\n return -1\n\n\ndef tournament(pl):\n losses = [(0) for p in pl]\n for i in range(len(pl)):\n for j in range(len(pl)):\n if i == j:\n continue\n winner = gridgame([pl[i], pl[j]])\n if winner == 0:\n losses[j] += 2\n elif winner == 1:\n losses[i] += 2\n elif winner == -1:\n losses[i] += 1\n losses[i] += 1\n pass\n z = list(zip(losses, pl))\n z.sort(key=lambda t: t[0])\n print(z[0][1].display(indent=4))\n return z\n\n\nclass humanplayer:\n\n def evaluate(self, board):\n me = tuple(board[0:2])\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\n for i in range(4):\n for j in range(4):\n if (i, j) == me:\n print('O', end=' ')\n elif (i, j) in others:\n print('X', end=' ')\n else:\n print('.', end=' ')\n print()\n print('Your last move was %d' % board[len(board) - 1])\n print(' 0')\n print('2 3')\n print(' 1')\n print('Enter move: ')\n move = int(input())\n return move\n\n\nclass fwrapper:\n\n def __init__(self, function, params, name):\n self.function = function\n self.childcount = params\n self.name = name\n\n\n<mask token>\n",
"step-5": "from random import random, randint, choice\r\nfrom copy import deepcopy\r\nfrom math import log\r\n\r\n\"\"\"\r\nОбертка для функций, которые будут находиться в узлах,\r\nпредставляющих функции. Его члены – имя функции, сама функция\r\nи количество принимаемых параметров.\r\n\"\"\"\r\nclass fwrapper:\r\n def __init__(self, function, childcount, name):\r\n self.function = function\r\n self.childcount = childcount\r\n self.name = name\r\n\r\n\"\"\"\r\nКласс функциональных узлов (имеющих потомков). Инициализируется экземпляром класса fwrapper.\r\nМетод evaluate вычисляет значения дочерних узлов и передает их представленной данным узлом\r\nфункции в качестве параметров.\r\n\"\"\"\r\nclass node:\r\n def __init__(self, fw, children):\r\n self.function = fw.function\r\n self.name = fw.name\r\n self.children = children\r\n\r\n def evaluate(self, inp):\r\n results = [n.evaluate(inp) for n in self.children]\r\n return self.function(results)\r\n \r\n # Метод display выводит представление дерева в виде строки\r\n def display(self, indent=0):\r\n print((' ' * indent) + self.name)\r\n for c in self.children:\r\n c.display(indent + 1)\r\n\r\n\"\"\"\r\nКласс узлов, которые просто возвращают один из переданных программе параметров.\r\nЕго метод evaluate возвращает параметр, соответствующий значению idx.\r\n\"\"\"\r\nclass paramnode:\r\n def __init__(self, idx):\r\n self.idx = idx\r\n\r\n def evaluate(self, inp):\r\n return inp[self.idx]\r\n \r\n # Это метод просто печатает индекс возвращаемого параметра\r\n def display(self, indent=0):\r\n print('%sp%d' % (' ' * indent, self.idx))\r\n\r\n\"\"\"\r\nУзлы, возвращающие константы. Метод evaluate просто возвращает\r\nто значение, которым экземпляр был инициализирован.\r\n\"\"\"\r\nclass constnode:\r\n def __init__(self, v):\r\n self.v = v\r\n\r\n def evaluate(self, inp):\r\n return self.v\r\n\r\n def display(self, indent=0):\r\n print('%s%d' % (' ' * indent, self.v))\r\n\r\n \r\n\"\"\"\r\nПростые функции типа add и subtract можно встроить с помощью лямбда-выражений.\r\nДля остальных функцию придется написать в отдельном блоке.\r\nВ любом случае функция обертывается в экземпляр класса fwrapper \r\nвместе со своим именем и числом параметров.\r\n\"\"\"\r\n\r\naddw = fwrapper(lambda l: l[0] + l[1], 2, 'add')\r\nsubw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')\r\nmulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')\r\n\r\n\r\ndef iffunc(l):\r\n if l[0] > 0:\r\n return l[1]\r\n else:\r\n return l[2]\r\n\r\n\r\nifw = fwrapper(iffunc, 3, 'if')\r\n\r\n\r\ndef isgreater(l):\r\n if l[0] > l[1]:\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ngtw = fwrapper(isgreater, 2, 'isgreater')\r\n\r\n# В этой строке создается список всех функций, чтобы впоследствии из него\r\n# можно было выбирать элементы случайным образом.\r\nflist = [addw, mulw, ifw, gtw, subw]\r\n\r\n# C помощью класса node можно построить дерево программы (в качестве примера)\r\ndef exampletree():\r\n return node(ifw, [\r\n node(gtw, [paramnode(0), constnode(3)]),\r\n node(addw, [paramnode(1), constnode(5)]),\r\n node(subw, [paramnode(1), constnode(2)]),\r\n ]\r\n )\r\n\r\n\r\n\"\"\"\r\nЭта функция создает узел, содержащий случайно выбранную функцию, и проверяет,\r\nсколько у этой функции должно быть параметров. Для каждого дочернего узла функция\r\nвызывает себя рекурсивно, чтобы создать новый узел. Так конструируется все дерево,\r\nпричем процесс построения ветвей завершается в тот момент, когда у очередного узла \r\nнет дочерних (то есть он представляет либо константу, либо переменную-параметр).\r\nПараметр pc равен числу параметров, принимаемых деревом на входе. Параметр fpr\r\nзадает вероятность того, что вновь создаваемый узел будет соответствовать функции,\r\nа ppr – вероятность того, что узел, не являющийся функцией, будет иметь тип paramnode.\r\n\"\"\"\r\ndef makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):\r\n if random() < fpr and maxdepth > 0:\r\n f = choice(flist)\r\n children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)\r\n for i in range(f.childcount)]\r\n return node(f, children)\r\n elif random() < ppr:\r\n return paramnode(randint(0, pc - 1))\r\n else:\r\n return constnode(randint(0, 10))\r\n\r\n\r\ndef hiddenfunction(x, y):\r\n return x ** 2 + 2 * y + 3 * x + 5\r\n\r\n\r\ndef buildhiddenset():\r\n rows = []\r\n for i in range(200):\r\n x = randint(0, 40)\r\n y = randint(0, 40)\r\n rows.append([x, y, hiddenfunction(x, y)])\r\n return rows\r\n\r\n\r\n\"\"\"\r\nЭта функция перебирает все строки набора данных, вычисляет функцию от указанных \r\nв ней аргументов и сравнивает с результатом. Абсолютные значения разностей суммируются.\r\nЧем меньше сумма, тем лучше программа, а значение 0 говорит о том, что все результаты \r\nв точности совпали. \r\n\"\"\"\r\ndef scorefunction(tree, s):\r\n dif = 0\r\n for data in s:\r\n v = tree.evaluate([data[0], data[1]])\r\n dif += abs(v - data[2])\r\n return dif\r\n\r\n\r\n\"\"\"\r\nЭта функция начинает с корня дерева и решает, следует ли изменить\r\nузел. Если нет, она рекурсивно вызывает mutate для дочерних узлов.\r\nМожет случиться, что мутации подвергнутся все узлы, а иногда дерево\r\nвообще не изменится.\r\n\"\"\"\r\n# Мутация путем замены поддерева\r\ndef mutate(t, pc, probchange=0.1):\r\n if random() < probchange:\r\n return makerandomtree(pc)\r\n else:\r\n result = deepcopy(t)\r\n if hasattr(t, \"children\"):\r\n result.children = [mutate(c, pc, probchange) for c in t.children]\r\n return result\r\n\r\n\"\"\"\r\nФункции, выполняющей скрещивание, передаются два дерева, и она\r\nобходит оба. Если случайно выбранное число не превышает пороговой\r\nвероятности, то функция возвращает копию первого дерева, в которой\r\nодна из ветвей заменена какой-то ветвью, взятой из второго дерева.\r\nПоскольку обход выполняется параллельно, то скрещивание произойдет примерно на одном уровне каждого дерева.\r\n\"\"\"\r\n# Функция скрещивания. Две успешные программы комбинируются с целью получения новой программы.\r\ndef crossover(t1, t2, probswap=0.7, top=1):\r\n if random() < probswap and not top:\r\n return deepcopy(t2)\r\n else:\r\n result = deepcopy(t1)\r\n if hasattr(t1, 'children') and hasattr(t2, 'children'):\r\n result.children = [crossover(c, choice(t2.children), probswap, 0)\r\n for c in t1.children]\r\n return result\r\n\r\n# Функция возвращает функцию ранжирования для имеющегося набора данных\r\ndef getrankfunction(dataset):\r\n def rankfunction(population):\r\n scores = [(scorefunction(t, dataset), t) for t in population]\r\n scores.sort()\r\n return scores\r\n\r\n return rankfunction\r\n\r\n\r\n\"\"\"\r\nСоздание конкурентной среды, в которой программы будут эволюционировать.\r\nСмысл в том, чтобы создать набор случайных программ, отобрать из них\r\nнаилучшие для копирования и модификации и повторять процесс, пока не будет\r\nвыполнено некое условие останова.\r\n\"\"\"\r\ndef evolve(pc, popsize, rankfunction, maxgen=500, mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):\r\n \"\"\"Эта функция создает случайную исходную популяцию, а затем выполняет не более maxgen итераций цикла,\r\n вызывая каждый раз функцию rankfunction для ранжирования программ от наилучшей до наихудшей.\r\n Наилучшая программа автоматически попадает в следующее поколение без изменения. \r\n Args:\r\n rankfunction: Функция, применяемая для ранжирования списка программ от наилучшей к наихудшей.\r\n mutationrate: Вероятность мутации, передаваемая функции mutate.\r\n breedingrate: Вероятность скрещивания, передаваемая функции crossover.\r\n popsize: Размер исходной популяции.\r\n probexp: Скорость убывания вероятности выбора программ с низким рангом. Чем выше значение, тем более суров процесс естественного отбора/\r\n probnew: Вероятность включения в новую популяцию совершенно новой случайно сгенерированной программы.\r\n\r\n Returns:\r\n tuple: Найденное наилучшее совпадние\r\n\r\n \"\"\"\r\n # Возвращает случайное число, отдавая предпочтение более маленьким числам.\r\n # Чем меньше значение pexp, тем больше будет доля маленьких чисел.\r\n def selectindex():\r\n return int(log(random()) / log(pexp))\r\n\r\n # Создаем случайную исходную популяцию\r\n population = [makerandomtree(pc) for i in range(popsize)]\r\n for i in range(maxgen):\r\n scores = rankfunction(population)\r\n print(scores[0][0])\r\n if scores[0][0] == 0: break\r\n\r\n # Две наилучшие особи отбираются всегда\r\n newpop = [scores[0][1], scores[1][1]]\r\n\r\n # Строим следующее поколение\r\n while len(newpop) < popsize:\r\n if random() > pnew:\r\n newpop.append(mutate(\r\n crossover(scores[selectindex()][1],\r\n scores[selectindex()][1],\r\n probswap=breedingrate),\r\n pc, probchange=mutationrate))\r\n else:\r\n # Добавляем случайный узел для внесения неопределенности\r\n newpop.append(makerandomtree(pc))\r\n\r\n population = newpop\r\n scores[0][1].display()\r\n return scores[0][1]\r\n\r\n#[\r\n# (10, \"program1\"),\r\n# (17, \"program2\"),\r\n#]\r\n\r\ndef gridgame(p):\r\n # Размер доски\r\n max = (3, 3)\r\n\r\n # Запоминаем последний ход каждого игрока\r\n lastmove = [-1, -1]\r\n\r\n # Запоминаем положения игроков\r\n location = [[randint(0, max[0]), randint(0, max[1])]]\r\n\r\n # Располагаем второго игрока на достаточном удалении от первого\r\n location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])\r\n # Не более 50 ходов до объявления ничьей\r\n for o in range(50):\r\n\r\n # Для каждого игрока\r\n for i in range(2):\r\n locs = location[i][:] + location[1 - i][:]\r\n locs.append(lastmove[i])\r\n move = p[i].evaluate(locs) % 4\r\n\r\n # Если игрок два раза подряд ходит в одном направлении, ему\r\n # засчитывается проигрыш\r\n if lastmove[i] == move: return 1 - i\r\n lastmove[i] = move\r\n if move == 0:\r\n location[i][0] -= 1\r\n # Доска ограничена\r\n if location[i][0] < 0: location[i][0] = 0\r\n if move == 1:\r\n location[i][0] += 1\r\n if location[i][0] > max[0]: location[i][0] = max[0]\r\n if move == 2:\r\n location[i][1] -= 1\r\n if location[i][1] < 0: location[i][1] = 0\r\n if move == 3:\r\n location[i][1] += 1\r\n if location[i][1] > max[1]: location[i][1] = max[1]\r\n\r\n # Если противник захвачен в плен, вы выиграли\r\n if location[i] == location[1 - i]: return i\r\n return -1\r\n\r\n\r\ndef tournament(pl):\r\n # Массив для подсчета проигрышей\r\n losses = [0 for p in pl]\r\n\r\n # Каждый игрок встречается со всеми другими\r\n for i in range(len(pl)):\r\n for j in range(len(pl)):\r\n if i == j: continue\r\n\r\n # Кто выиграл?\r\n winner = gridgame([pl[i], pl[j]])\r\n\r\n # Два очка за поражение, одно за ничью\r\n if winner == 0:\r\n losses[j] += 2\r\n elif winner == 1:\r\n losses[i] += 2\r\n elif winner == -1:\r\n losses[i] += 1\r\n losses[i] += 1\r\n pass\r\n\r\n # Отсортировать и вернуть результаты\r\n z = list(zip(losses, pl))\r\n z.sort(key=lambda t: t[0])\r\n # input()\r\n print(z[0][1].display(indent=4))\r\n return z\r\n\r\nclass humanplayer:\r\n def evaluate(self, board):\r\n\r\n # Получить мою позицию и позиции других игроков\r\n me = tuple(board[0:2])\r\n others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]\r\n\r\n # Нарисовать доску\r\n for i in range(4):\r\n for j in range(4):\r\n if (i, j) == me:\r\n print('O',end=' ')\r\n elif (i, j) in others:\r\n print('X',end=' ')\r\n else:\r\n print('.',end=' ')\r\n print()\r\n\r\n # Показать ходы, для справки\r\n print('Your last move was %d' % board[len(board) - 1])\r\n print(' 0')\r\n print('2 3')\r\n print(' 1')\r\n print('Enter move: ')\r\n\r\n # Вернуть введенное пользователем число\r\n move = int(input())\r\n return move\r\n\r\n\r\nclass fwrapper:\r\n def __init__(self, function, params, name):\r\n self.function = function\r\n self.childcount = params\r\n self.name = name\r\n\r\n\r\n# flist={'str':[substringw,concatw],'int':[indexw]}\r\nflist = [addw, mulw, ifw, gtw, subw]\r\n",
"step-ids": [
23,
25,
28,
31,
34
]
}
|
[
23,
25,
28,
31,
34
] |
"""
This is a post login API and hence would have APIDetails and SessionDetails in the request object
-------------------------------------------------------------------------------------------------
Step 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4
Step 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes
Step 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output
Step 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output
"""
"""
INPUT:
{
"APIDetails":{
"token_type":1,
"token_vendor_id":1,
"token_string":"sdxfcgvbhjnmklasdfghjk",
"dev_key":"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234"
},
"SessionDetails":{
"profile_id":159,
"session_id":787,
"session_key":"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO"
},
"APIParams":{
"user_ip" : "192.168.0.1"
}
}
"""
"""
OUTPUT:
{
"AuthenticationDetails": {
"Status": "Success",
"Message": "ApiDetails fine to process"
},
"SessionDetails": {
"Status": "Success",
"Message": "session is active. session details updated",
"Payload": {
"profile_id": 159,
"session_id": 787,
"session_key": "LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB"
}
},
"Payload": {
"Status": "Success",
"Message": "ticket types and respective questions Fetched successfully",
"Payload": {
"geo_id": 2,
"geo_name": "Indian Subcontinent",
"geo_currency": "INR"
}
}
}
"""
|
normal
|
{
"blob_id": "d7daf9b26f0b9f66b15b8533df032d17719e548b",
"index": 3343,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\nThis is a post login API and hence would have APIDetails and SessionDetails in the request object\n-------------------------------------------------------------------------------------------------\nStep 1: find if user's ip address is provided in the request object, if yes then got to step 2 else goto step 4\nStep 2: call third party api to find the country of the IP address and its ISO2 and ISO3 codes\nStep 3: using the ISO2 and/or ISO3 codes get the user's geo and associated currency. Return output\nStep 4: from UserProfiles table get city_id and using this get the user's geo and associated currency. Return output\n\"\"\"\n\n\"\"\"\nINPUT:\n{\n \"APIDetails\":{\n \t\"token_type\":1,\n \t\"token_vendor_id\":1,\n \t\"token_string\":\"sdxfcgvbhjnmklasdfghjk\",\n \t\"dev_key\":\"sjdkljagagerukjdgjncjdsnjkfhkjasdghreuiuie@#$%$dgd#$@d234\"\n },\n \"SessionDetails\":{\n \"profile_id\":159,\n \"session_id\":787,\n \"session_key\":\"xxbJt0nUwyMbsDdOfVFYISRjoD1DC0jO\"\n },\n \"APIParams\":{\n \"user_ip\" : \"192.168.0.1\"\n }\n}\n\"\"\"\n\n\"\"\"\nOUTPUT:\n{\n \"AuthenticationDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"ApiDetails fine to process\"\n },\n \"SessionDetails\": {\n \"Status\": \"Success\",\n \"Message\": \"session is active. session details updated\",\n \"Payload\": {\n \"profile_id\": 159,\n \"session_id\": 787,\n \"session_key\": \"LcTyf2Ypx6YRQOz3AYOyaE2uedblWnZB\"\n }\n },\n \"Payload\": {\n \"Status\": \"Success\",\n \"Message\": \"ticket types and respective questions Fetched successfully\",\n \"Payload\": {\n \"geo_id\": 2,\n \"geo_name\": \"Indian Subcontinent\",\n \"geo_currency\": \"INR\"\n }\n }\n}\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class ImageData:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, image):
self.image = image
self._contour_interval_dist = None
self._feet_per_pixel = None
<|reserved_special_token_0|>
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.
word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
<|reserved_special_token_0|>
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageData:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, image):
self.image = image
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(
self.__left_thresh * cols):int(self.__right_thresh * cols)]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy
=self.__resize_factor, interpolation=cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5,
7, 21)
threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.
THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.
word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
<|reserved_special_token_0|>
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i + offset].isnumeric():
cand = i, int(word_list[i + offset])
candidates.append(cand)
return candidates
<|reserved_special_token_0|>
@property
def contour_interval_dist(self):
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
<|reserved_special_token_0|>
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageData:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, image):
self.image = image
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(
self.__left_thresh * cols):int(self.__right_thresh * cols)]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy
=self.__resize_factor, interpolation=cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5,
7, 21)
threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.
THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.
word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
<|reserved_special_token_0|>
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i + offset].isnumeric():
cand = i, int(word_list[i + offset])
candidates.append(cand)
return candidates
<|reserved_special_token_0|>
@property
def contour_interval_dist(self):
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
@feet_per_pixel.setter
def feet_per_pixel(self, value):
self._feet_per_pixel = value
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ImageData:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, image):
self.image = image
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(
self.__left_thresh * cols):int(self.__right_thresh * cols)]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy
=self.__resize_factor, interpolation=cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5,
7, 21)
threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.
THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.
word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
<|reserved_special_token_0|>
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i + offset].isnumeric():
cand = i, int(word_list[i + offset])
candidates.append(cand)
return candidates
def __get_words(self):
filename = '{}.png'.format(os.getpid())
cv2.imwrite(filename, self.sub_image)
words = pytesseract.image_to_string(Image.open(filename))
boxes = pytesseract.image_to_string(Image.open(filename), boxes=
True, config='hocr')
os.remove(filename)
word_list = words.split()
box_list = boxes.split()
return word_list, box_list
@property
def contour_interval_dist(self):
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
@feet_per_pixel.setter
def feet_per_pixel(self, value):
self._feet_per_pixel = value
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import cv2
import pytesseract
import os
from PIL import Image
import numpy as np
from helper_functions import Helper
class ImageData:
# multipliers to get portion of image with interval value
__bottom_thresh = 0.9
__left_thresh = 0.35
__right_thresh = 0.65
# (words, offset) to contour interval value
__words_offsets = [("CONTOUR", 2), ("INTERVAL", 1), ("FEET", -1)]
__resize_factor = 6
def __init__(self, image):
self.image = image
# self.sub_image = self.__get_sub_image()
# word_list, box_list = self.__get_words()
# self.word_list = word_list
# self.box_list = box_list
self._contour_interval_dist = None
self._feet_per_pixel = None
def __get_sub_image(self):
rows, cols, chan = self.image.shape
sub_image = self.image[
int(self.__bottom_thresh*rows):rows, # bottom rows
int(self.__left_thresh*cols):int(self.__right_thresh*cols) # middle rows
]
sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy=self.__resize_factor,
interpolation = cv2.INTER_LINEAR)
sub_image = Helper.convert_image_to_mask(sub_image)
gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, 7, 21)
threshold_image = cv2.threshold(gray_denoised_image,225,255,cv2.THRESH_BINARY_INV)[1]
return sub_image
def __get_countour_interval_dist(self):
candidates = []
for word, offset in self.__words_offsets:
candidates += self.__find_candidates_for_id_and_index(self.word_list, word, offset)
return candidates[0][1] if len(candidates) > 0 else 40
def __get_feet_per_pixel(self):
# row_size = 6
# total = int(len(self.box_list) / 6)
# idx = 0
# nums = [(idx, int(char)) for idx, char in enumerate(self.box_list)
# if idx % row_size == 0 and char.isdigit() and int(char) > 2 and int(char) < 10]
# nums.sort(key=lambda val: self.box_list[val[0] + 2])
# threshold = 3
# prev_x = -1
# prev_y = -2 * threshold
# prev_num = -1
# img = self.sub_image.copy()
# lsd = cv2.createLineSegmentDetector(0)
# lines = lsd.detect(img)[0]
# drawn_img = lsd.drawSegments(img,lines)
# cv2.imshow("LSD",drawn_img )
# # h, w, _ = img.shape
# # for (idx, num) in nums:
# # cur_x = int(self.box_list[idx + 1])
# # cur_y = int(self.box_list[idx + 2])
# # cur_x2 = int(self.box_list[idx + 3])
# # cur_y2 = int(self.box_list[idx + 4])
# # print(str(num) + ": " + str(cur_x) + ", " + str(cur_y) + " :: " + str(cur_x2) + ", " + str(cur_y2))
# # img = cv2.rectangle(img,(cur_x,h-cur_y),(cur_x2,h-cur_y2),(255,0,0),2)
# # # if abs(cur_y - prev_y) < threshold:
# # # dist = abs(cur_x - cur_y)
# # # diff = abs(num - prev_num)
# # # print("possibility found ^\n--------")
# # # prev_x = cur_x
# # # prev_y = cur_y
# # # prev_num = num
# img = cv2.resize(img, None, fx=1/6, fy=1/6,
# interpolation = cv2.INTER_LINEAR)
# cv2.imshow("blah", img)
# print(nums)
return 5280 / 790# hardcoded estimatem, ft per mile / pixel per mile = feet per pixel
def __find_candidates_for_id_and_index(self, word_list, id_word, offset):
candidates = []
indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]
for i in indices:
if word_list[i+offset].isnumeric():
cand = (i, int(word_list[i+offset]))
candidates.append(cand)
return candidates
def __get_words(self):
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, self.sub_image)
words = pytesseract.image_to_string(Image.open(filename))
boxes = pytesseract.image_to_string(Image.open(filename), boxes=True, config="hocr")
os.remove(filename)
word_list = words.split()
box_list = boxes.split()
return word_list, box_list
@property
def contour_interval_dist(self):
# if self._contour_interval_dist is None:
# self._contour_interval_dist = self.__get_countour_interval_dist()
# return self._contour_interval_dist
# return 40
return 40
@contour_interval_dist.setter
def contour_interval_dist(self, value):
self._contour_interval_dist = value
@property
def feet_per_pixel(self):
if self._feet_per_pixel is None:
self._feet_per_pixel = self.__get_feet_per_pixel()
return self._feet_per_pixel
@feet_per_pixel.setter
def feet_per_pixel(self, value):
self._feet_per_pixel = value
class TopographicMap:
def __init__(self, filename):
self.filename = filename
self.image = cv2.imread(filename, 1)[500:-550, 500:-500]
# self.image = cv2.imread(filename, 1)#[500:-550, 500:-500]
self.image_data = ImageData(self.image)
self.height, self.width, self.channels = self.image.shape
if __name__ == '__main__':
# img = Topographic_Map("SanLuisObispo.jpg")
import numpy as np
import time
image = cv2.imread('maps/SanLuisObispo.jpg', 1)[500:1000, 500:1300]
r, c, chan = image.shape
tl = image[:int(r/2), :int(c/2)]
tr = image[:int(r/2), int(c/2):]
bl = image[int(r/2):, :int(c/2)]
br = image[int(r/2):, int(c/2):]
s = time.time()
img = cv2.fastNlMeansDenoising(image, None, 5, 7, 21)
e = time.time()
print("total image: " + str(e-s))
s = time.time()
tl = cv2.fastNlMeansDenoising(tl, None, 5, 7, 21)
tr = cv2.fastNlMeansDenoising(tr, None, 5, 7, 21)
bl = cv2.fastNlMeansDenoising(bl, None, 5, 7, 21)
br = cv2.fastNlMeansDenoising(br, None, 5, 7, 21)
e = time.time()
top = np.concatenate((tl, tr), axis=1)
bottom = np.concatenate((bl, br), axis=1)
new_image = np.concatenate((top, bottom), axis=0)
print("partitioned image: " + str(e-s))
cv2.imshow('img', img)
cv2.imshow('new_image', new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "d3be26d56b3597a5d9e3a870b735a30d90d1e501",
"index": 8165,
"step-1": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n <mask token>\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n <mask token>\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n <mask token>\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n <mask token>\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n <mask token>\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n\n @feet_per_pixel.setter\n def feet_per_pixel(self, value):\n self._feet_per_pixel = value\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ImageData:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, image):\n self.image = image\n self._contour_interval_dist = None\n self._feet_per_pixel = None\n\n def __get_sub_image(self):\n rows, cols, chan = self.image.shape\n sub_image = self.image[int(self.__bottom_thresh * rows):rows, int(\n self.__left_thresh * cols):int(self.__right_thresh * cols)]\n sub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy\n =self.__resize_factor, interpolation=cv2.INTER_LINEAR)\n sub_image = Helper.convert_image_to_mask(sub_image)\n gray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, \n 7, 21)\n threshold_image = cv2.threshold(gray_denoised_image, 225, 255, cv2.\n THRESH_BINARY_INV)[1]\n return sub_image\n\n def __get_countour_interval_dist(self):\n candidates = []\n for word, offset in self.__words_offsets:\n candidates += self.__find_candidates_for_id_and_index(self.\n word_list, word, offset)\n return candidates[0][1] if len(candidates) > 0 else 40\n <mask token>\n\n def __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n candidates = []\n indices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n for i in indices:\n if word_list[i + offset].isnumeric():\n cand = i, int(word_list[i + offset])\n candidates.append(cand)\n return candidates\n\n def __get_words(self):\n filename = '{}.png'.format(os.getpid())\n cv2.imwrite(filename, self.sub_image)\n words = pytesseract.image_to_string(Image.open(filename))\n boxes = pytesseract.image_to_string(Image.open(filename), boxes=\n True, config='hocr')\n os.remove(filename)\n word_list = words.split()\n box_list = boxes.split()\n return word_list, box_list\n\n @property\n def contour_interval_dist(self):\n return 40\n\n @contour_interval_dist.setter\n def contour_interval_dist(self, value):\n self._contour_interval_dist = value\n\n @property\n def feet_per_pixel(self):\n if self._feet_per_pixel is None:\n self._feet_per_pixel = self.__get_feet_per_pixel()\n return self._feet_per_pixel\n\n @feet_per_pixel.setter\n def feet_per_pixel(self, value):\n self._feet_per_pixel = value\n\n\nclass TopographicMap:\n\n def __init__(self, filename):\n self.filename = filename\n self.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n self.image_data = ImageData(self.image)\n self.height, self.width, self.channels = self.image.shape\n\n\n<mask token>\n",
"step-5": "import cv2\nimport pytesseract\nimport os\nfrom PIL import Image\nimport numpy as np\n\nfrom helper_functions import Helper\n\nclass ImageData:\n\t# multipliers to get portion of image with interval value\n\t__bottom_thresh = 0.9\n\t__left_thresh = 0.35\n\t__right_thresh = 0.65\n\n\t# (words, offset) to contour interval value\n\t__words_offsets = [(\"CONTOUR\", 2), (\"INTERVAL\", 1), (\"FEET\", -1)]\n\t__resize_factor = 6\n\n\tdef __init__(self, image):\n\t\tself.image = image\n\n\t\t# self.sub_image = self.__get_sub_image()\n\t\t\n\t\t# word_list, box_list = self.__get_words()\n\t\t# self.word_list = word_list\n\t\t# self.box_list = box_list\n\n\t\tself._contour_interval_dist = None\n\t\tself._feet_per_pixel = None\n\n\tdef __get_sub_image(self):\n\t\trows, cols, chan = self.image.shape\n\n\t\tsub_image = self.image[\n\t\t\tint(self.__bottom_thresh*rows):rows, \t\t\t\t\t\t# bottom rows\n\t\t\tint(self.__left_thresh*cols):int(self.__right_thresh*cols)\t# middle rows\n\t\t\t]\n\n\t\tsub_image = cv2.resize(sub_image, None, fx=self.__resize_factor, fy=self.__resize_factor, \n\t\t\tinterpolation = cv2.INTER_LINEAR)\n\n\t\tsub_image = Helper.convert_image_to_mask(sub_image)\n\t\tgray_denoised_image = cv2.fastNlMeansDenoising(sub_image, None, 5, 7, 21)\n\t\tthreshold_image = cv2.threshold(gray_denoised_image,225,255,cv2.THRESH_BINARY_INV)[1]\n\n\t\treturn sub_image\n\n\tdef __get_countour_interval_dist(self):\n\t\tcandidates = []\n\n\t\tfor word, offset in self.__words_offsets:\n\t\t\tcandidates += self.__find_candidates_for_id_and_index(self.word_list, word, offset)\n\n\t\treturn candidates[0][1] if len(candidates) > 0 else 40 \n\n\tdef __get_feet_per_pixel(self):\n\t\t# row_size = 6\n\t\t# total = int(len(self.box_list) / 6)\n\t\t# idx = 0\n\n\t\t# nums = [(idx, int(char)) for idx, char in enumerate(self.box_list) \n\t\t# if idx % row_size == 0 and char.isdigit() and int(char) > 2 and int(char) < 10]\n\n\t\t# nums.sort(key=lambda val: self.box_list[val[0] + 2])\n\n\t\t# threshold = 3\n\t\t# prev_x = -1\n\t\t# prev_y = -2 * threshold\n\t\t# prev_num = -1\n\n\t\t# img = self.sub_image.copy()\n\n\t\t# lsd = cv2.createLineSegmentDetector(0)\n\t\t# lines = lsd.detect(img)[0] \n\t\t# drawn_img = lsd.drawSegments(img,lines)\n\t\t# cv2.imshow(\"LSD\",drawn_img )\n\t\t\n\t\t# # h, w, _ = img.shape\n\n\t\t# # for (idx, num) in nums:\n\t\t# # \tcur_x = int(self.box_list[idx + 1])\n\t\t# # \tcur_y = int(self.box_list[idx + 2])\n\t\t# # \tcur_x2 = int(self.box_list[idx + 3])\n\t\t# # \tcur_y2 = int(self.box_list[idx + 4])\n\n\t\t# # \tprint(str(num) + \": \" + str(cur_x) + \", \" + str(cur_y) + \" :: \" + str(cur_x2) + \", \" + str(cur_y2))\n\t\t# # \timg = cv2.rectangle(img,(cur_x,h-cur_y),(cur_x2,h-cur_y2),(255,0,0),2)\n\t\t# # \t# if abs(cur_y - prev_y) < threshold:\n\t\t# # \t# \tdist = abs(cur_x - cur_y)\n\t\t# # \t# \tdiff = abs(num - prev_num)\n\t\t# # \t# \tprint(\"possibility found ^\\n--------\")\n\n\t\t# # \t# prev_x = cur_x\n\t\t# # \t# prev_y = cur_y\n\t\t# # \t# prev_num = num\n\t\t# img = cv2.resize(img, None, fx=1/6, fy=1/6, \n\t\t# \tinterpolation = cv2.INTER_LINEAR)\n\t\t# cv2.imshow(\"blah\", img)\n\t\t# print(nums)\n\n\t\treturn 5280 / 790# hardcoded estimatem, ft per mile / pixel per mile = feet per pixel\n\n\tdef __find_candidates_for_id_and_index(self, word_list, id_word, offset):\n\t\tcandidates = []\n\n\t\tindices = [i for i, x in enumerate(word_list) if x.upper() == id_word]\n\n\t\tfor i in indices:\n\t\t\tif word_list[i+offset].isnumeric():\n\t\t\t\tcand = (i, int(word_list[i+offset]))\n\t\t\t\tcandidates.append(cand)\n\n\t\treturn candidates\n\n\tdef __get_words(self):\n\t\tfilename = \"{}.png\".format(os.getpid())\n\t\tcv2.imwrite(filename, self.sub_image)\n\n\t\twords = pytesseract.image_to_string(Image.open(filename))\n\n\t\tboxes = pytesseract.image_to_string(Image.open(filename), boxes=True, config=\"hocr\")\n\n\t\tos.remove(filename)\n\t\tword_list = words.split()\n\t\tbox_list = boxes.split()\n\n\t\treturn word_list, box_list\n\n\t@property\n\tdef contour_interval_dist(self):\n\t\t# if self._contour_interval_dist is None:\n\t\t# \tself._contour_interval_dist = self.__get_countour_interval_dist()\n\n\t\t# return self._contour_interval_dist\n\t\t# return 40\n\t\treturn 40\n\n\t@contour_interval_dist.setter\n\tdef contour_interval_dist(self, value):\n\t\tself._contour_interval_dist = value\n\n\t@property\n\tdef feet_per_pixel(self):\n\t\tif self._feet_per_pixel is None:\n\t\t\tself._feet_per_pixel = self.__get_feet_per_pixel()\n\n\t\treturn self._feet_per_pixel\n\n\t@feet_per_pixel.setter\n\tdef feet_per_pixel(self, value):\n\t\tself._feet_per_pixel = value\n\nclass TopographicMap:\n\tdef __init__(self, filename):\n\t\tself.filename = filename\n\t\tself.image = cv2.imread(filename, 1)[500:-550, 500:-500]\n\t\t# self.image = cv2.imread(filename, 1)#[500:-550, 500:-500]\n\t\tself.image_data = ImageData(self.image)\n\n\t\tself.height, self.width, self.channels = self.image.shape\n\t\t\n\nif __name__ == '__main__':\n\t# img = Topographic_Map(\"SanLuisObispo.jpg\")\n\timport numpy as np\n\timport time\n\timage = cv2.imread('maps/SanLuisObispo.jpg', 1)[500:1000, 500:1300]\n\tr, c, chan = image.shape\n\ttl = image[:int(r/2), :int(c/2)]\n\ttr = image[:int(r/2), int(c/2):]\n\tbl = image[int(r/2):, :int(c/2)]\n\tbr = image[int(r/2):, int(c/2):]\n\t\n\ts = time.time()\n\timg = cv2.fastNlMeansDenoising(image, None, 5, 7, 21)\n\te = time.time()\n\n\tprint(\"total image: \" + str(e-s))\n\n\ts = time.time()\n\ttl = cv2.fastNlMeansDenoising(tl, None, 5, 7, 21)\n\ttr = cv2.fastNlMeansDenoising(tr, None, 5, 7, 21)\n\tbl = cv2.fastNlMeansDenoising(bl, None, 5, 7, 21)\n\tbr = cv2.fastNlMeansDenoising(br, None, 5, 7, 21)\n\te = time.time()\n\n\ttop = np.concatenate((tl, tr), axis=1)\n\tbottom = np.concatenate((bl, br), axis=1)\n\tnew_image = np.concatenate((top, bottom), axis=0)\n\n\tprint(\"partitioned image: \" + str(e-s))\n\n\tcv2.imshow('img', img)\n\tcv2.imshow('new_image', new_image)\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n",
"step-ids": [
6,
10,
11,
12,
17
]
}
|
[
6,
10,
11,
12,
17
] |
import ast
import datetime
import json
from base64 import b64encode
import requests
IMGUR_BASE = "https://api.imgur.com"
class Task:
"""
A class used to represent a job
...
Attributes
----------
queue : list
the list of all urls
pending : list
the name of all pending urls
complete : list
the name of all completed urls
failed : list
the name of all failed urls
url_map : dict
a dictionary that maps provided urls with imgur urls
created:
date created
finished:
date finished
status:
the job status
credentials:
the access token and other useful objects
"""
def __init__(self):
"""
Create the object
:rtype: object
"""
self.queue = list()
self.pending = []
self.complete = []
self.failed = []
self.url_map = {}
self.created = datetime.datetime.now().isoformat()
self.finished = None
self.status = "pending"
self.credentials = None
def initialize(self, urls, cred):
"""
Initialize the object with parameters urls and cred
:param urls : list > the list of urls
:param cred : dict > the client credentials
:rtype: object
"""
for i in urls:
self.enqueue(i)
self.pending.append(i)
clean = str(cred).replace('b\"', '').replace('\"', '').replace("'", '"')
self.credentials = ast.literal_eval(clean)
def export(self):
"""
:rtype: dict
"""
return {
"created": self.created,
"finished": self.finished,
"status": self.status,
"uploaded": {
"pending": self.pending,
"complete": self.complete,
"failed": self.failed
}
}
def executeAll(self, _set_task_progress):
"""
Sequentially upload images and update job progress
:rtype: object
"""
_set_task_progress(self)
self.status = 'in-progress'
_set_task_progress(self)
while self.size() != 0:
val = self.dequeue()
if self.executeOne(val):
self.pending.remove(val)
self.complete.append(self.url_map[val])
_set_task_progress(self)
else:
self.pending.remove(val)
self.failed.append(val)
_set_task_progress(self)
self.status = 'complete'
self.finished = datetime.datetime.now().isoformat()
_set_task_progress(self)
def executeOne(self, val):
"""
Upload a unique image
:rtype: object
"""
v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)
if v:
self.url_map.update({val: url})
return True
else:
self.url_map.update({val: url})
return False
def enqueue(self, data):
"""
Adding elements to queue
:rtype: object
"""
# Checking to avoid duplicate entry (not mandatory)
if data not in self.queue:
self.queue.insert(0, data)
return True
return False
def dequeue(self):
"""
Adding elements to queue
:rtype: object
"""
if len(self.queue) > 0:
return self.queue.pop()
return ("Queue Empty!")
def size(self):
"""
Getting the size of the queue
:rtype: object
"""
return len(self.queue)
def upload_image(self, path=None, url=None, title=None, description=None,
album=None):
"""
Upload image to the imgur server and returns the new url
:rtype: object
"""
if bool(path) == bool(url):
raise LookupError("Either path or url must be given.")
if path:
with open(path, 'rb') as image_file:
binary_data = image_file.read()
image = b64encode(binary_data)
else:
image = url
payload = {'album_id': "58tq5Nw", 'image': image,
'title': title, 'description': description}
token = ast.literal_eval(str(self.credentials))["access_token"]
authentication = {'Authorization': 'Bearer {0}'.format(token)}
verify = True
resp = requests.post(IMGUR_BASE + "/3/image", payload, headers=authentication, verify=verify)
if 'error' in json.loads(resp.content)["data"]:
return False, json.loads(resp.content)["data"]["error"]
else:
return True, json.loads(resp.content)["data"]["link"]
|
normal
|
{
"blob_id": "63ee99012089dcb0e5b41860c95e13fff52c6731",
"index": 1546,
"step-1": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n <mask token>\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-2": "<mask token>\n\n\nclass Task:\n <mask token>\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n <mask token>\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-3": "<mask token>\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-4": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\nIMGUR_BASE = 'https://api.imgur.com'\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = 'pending'\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\"', '').replace('\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {'created': self.created, 'finished': self.finished,\n 'status': self.status, 'uploaded': {'pending': self.pending,\n 'complete': self.complete, 'failed': self.failed}}\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v, url = self.upload_image(path=None, url=val, title=None,\n description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return 'Queue Empty!'\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=\n None, album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError('Either path or url must be given.')\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': '58tq5Nw', 'image': image, 'title': title,\n 'description': description}\n token = ast.literal_eval(str(self.credentials))['access_token']\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + '/3/image', payload, headers=\n authentication, verify=verify)\n if 'error' in json.loads(resp.content)['data']:\n return False, json.loads(resp.content)['data']['error']\n else:\n return True, json.loads(resp.content)['data']['link']\n",
"step-5": "import ast\nimport datetime\nimport json\nfrom base64 import b64encode\nimport requests\n\nIMGUR_BASE = \"https://api.imgur.com\"\n\n\nclass Task:\n \"\"\"\n A class used to represent a job\n ...\n\n Attributes\n ----------\n queue : list\n the list of all urls\n pending : list\n the name of all pending urls\n complete : list\n the name of all completed urls\n failed : list\n the name of all failed urls\n url_map : dict\n a dictionary that maps provided urls with imgur urls\n created:\n date created\n finished:\n date finished\n status:\n the job status\n credentials:\n the access token and other useful objects\n\n \"\"\"\n def __init__(self):\n \"\"\"\n Create the object\n :rtype: object\n \"\"\"\n self.queue = list()\n self.pending = []\n self.complete = []\n self.failed = []\n self.url_map = {}\n self.created = datetime.datetime.now().isoformat()\n self.finished = None\n self.status = \"pending\"\n self.credentials = None\n\n def initialize(self, urls, cred):\n \"\"\"\n Initialize the object with parameters urls and cred\n :param urls : list > the list of urls\n :param cred : dict > the client credentials\n :rtype: object\n \"\"\"\n for i in urls:\n self.enqueue(i)\n self.pending.append(i)\n clean = str(cred).replace('b\\\"', '').replace('\\\"', '').replace(\"'\", '\"')\n self.credentials = ast.literal_eval(clean)\n\n def export(self):\n \"\"\"\n\n :rtype: dict\n \"\"\"\n return {\n \"created\": self.created,\n \"finished\": self.finished,\n \"status\": self.status,\n \"uploaded\": {\n \"pending\": self.pending,\n \"complete\": self.complete,\n \"failed\": self.failed\n }\n }\n\n def executeAll(self, _set_task_progress):\n \"\"\"\n Sequentially upload images and update job progress\n :rtype: object\n \"\"\"\n _set_task_progress(self)\n self.status = 'in-progress'\n _set_task_progress(self)\n while self.size() != 0:\n val = self.dequeue()\n if self.executeOne(val):\n self.pending.remove(val)\n self.complete.append(self.url_map[val])\n _set_task_progress(self)\n else:\n self.pending.remove(val)\n self.failed.append(val)\n _set_task_progress(self)\n self.status = 'complete'\n self.finished = datetime.datetime.now().isoformat()\n _set_task_progress(self)\n\n def executeOne(self, val):\n \"\"\"\n Upload a unique image\n :rtype: object\n \"\"\"\n v,url = self.upload_image(path=None, url=val, title=None, description=None, album=None)\n if v:\n self.url_map.update({val: url})\n return True\n else:\n self.url_map.update({val: url})\n return False\n\n\n def enqueue(self, data):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n # Checking to avoid duplicate entry (not mandatory)\n if data not in self.queue:\n self.queue.insert(0, data)\n return True\n return False\n\n\n def dequeue(self):\n \"\"\"\n Adding elements to queue\n :rtype: object\n \"\"\"\n if len(self.queue) > 0:\n return self.queue.pop()\n return (\"Queue Empty!\")\n\n\n def size(self):\n \"\"\"\n Getting the size of the queue\n :rtype: object\n \"\"\"\n return len(self.queue)\n\n def upload_image(self, path=None, url=None, title=None, description=None,\n album=None):\n \"\"\"\n Upload image to the imgur server and returns the new url\n :rtype: object\n \"\"\"\n if bool(path) == bool(url):\n raise LookupError(\"Either path or url must be given.\")\n if path:\n with open(path, 'rb') as image_file:\n binary_data = image_file.read()\n image = b64encode(binary_data)\n else:\n image = url\n payload = {'album_id': \"58tq5Nw\", 'image': image,\n 'title': title, 'description': description}\n\n token = ast.literal_eval(str(self.credentials))[\"access_token\"]\n\n authentication = {'Authorization': 'Bearer {0}'.format(token)}\n verify = True\n resp = requests.post(IMGUR_BASE + \"/3/image\", payload, headers=authentication, verify=verify)\n if 'error' in json.loads(resp.content)[\"data\"]:\n return False, json.loads(resp.content)[\"data\"][\"error\"]\n else:\n return True, json.loads(resp.content)[\"data\"][\"link\"]\n\n\n",
"step-ids": [
8,
9,
12,
13,
14
]
}
|
[
8,
9,
12,
13,
14
] |
<|reserved_special_token_0|>
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RedirectToSiteRootHandler(webapp2.RequestHandler):
<|reserved_special_token_0|>
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import webapp2
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
app = webapp2.WSGIApplication([('/blog', RedirectToSiteRootHandler), (
'/blog/', RedirectToSiteRootHandler), ('(.*[^/])',
AppendTrailingSlashHandler)], debug=True)
<|reserved_special_token_1|>
import webapp2
class RedirectToSiteRootHandler(webapp2.RequestHandler):
def get(self):
self.response.set_status(301)
self.response.headers['Location'] = '/'
class AppendTrailingSlashHandler(webapp2.RequestHandler):
def get(self, uri):
self.response.set_status(301)
redirect_uri = uri + '/'
self.response.headers['Location'] = redirect_uri
self.response.headers['Content-Type'] = 'text/plain'
self.response.write(redirect_uri)
app = webapp2.WSGIApplication([
('/blog', RedirectToSiteRootHandler),
('/blog/', RedirectToSiteRootHandler),
('(.*[^/])', AppendTrailingSlashHandler),
], debug=True)
|
flexible
|
{
"blob_id": "064792a6aba96a679bec606a85b19d4925861f7d",
"index": 2493,
"step-1": "<mask token>\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n <mask token>\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\n<mask token>\n",
"step-4": "import webapp2\n\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\n\napp = webapp2.WSGIApplication([('/blog', RedirectToSiteRootHandler), (\n '/blog/', RedirectToSiteRootHandler), ('(.*[^/])',\n AppendTrailingSlashHandler)], debug=True)\n",
"step-5": "import webapp2\n\nclass RedirectToSiteRootHandler(webapp2.RequestHandler):\n def get(self):\n self.response.set_status(301)\n self.response.headers['Location'] = '/'\n\nclass AppendTrailingSlashHandler(webapp2.RequestHandler):\n def get(self, uri):\n self.response.set_status(301)\n redirect_uri = uri + '/'\n self.response.headers['Location'] = redirect_uri\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.write(redirect_uri)\n\napp = webapp2.WSGIApplication([\n ('/blog', RedirectToSiteRootHandler),\n ('/blog/', RedirectToSiteRootHandler),\n ('(.*[^/])', AppendTrailingSlashHandler),\n], debug=True)\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import os
import numpy as np
import scipy as sp
import sys
from sure import that
from itertools import combinations, permutations
input_file = open('input1.txt', 'r')
output_file = open('output1.txt', 'w')
T = int(input_file.readline().rstrip('\n'))
case_num = 1
while case_num - 1 < T:
# Parse data
data = map(int, input_file.readline().rstrip('\n').split(' '))
typed = data[0]
length = data[1]
probs = map(float, input_file.readline().rstrip('\n').split(' '))
assert that(len(probs)).equals(typed)
enter = 1
def product(probs):
if not probs:
return 1
return reduce(lambda x, y: x * y, probs)
def expected_strokes(typed, length):
finish = length - typed + enter
retype = finish + length + enter
correct = product(probs[:typed])
strokes = correct * finish + (1 - correct) * retype
return strokes
def get_min_backspace_stroke_count(typed, length):
min_strokes = 99999999999999
for backspaces in range(typed + 1):
min_strokes = min(backspaces + expected_strokes(typed - backspaces, length), min_strokes)
return min_strokes
result = min(length + 2, get_min_backspace_stroke_count(typed, length))
# Write result
output_file.write('Case #{}: {}\n'.format(case_num, result))
case_num += 1
|
normal
|
{
"blob_id": "10c8316aee2107dc84ce7c1427dd62f52a2ce697",
"index": 4549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile case_num - 1 < T:\n data = map(int, input_file.readline().rstrip('\\n').split(' '))\n typed = data[0]\n length = data[1]\n probs = map(float, input_file.readline().rstrip('\\n').split(' '))\n assert that(len(probs)).equals(typed)\n enter = 1\n\n def product(probs):\n if not probs:\n return 1\n return reduce(lambda x, y: x * y, probs)\n\n def expected_strokes(typed, length):\n finish = length - typed + enter\n retype = finish + length + enter\n correct = product(probs[:typed])\n strokes = correct * finish + (1 - correct) * retype\n return strokes\n\n def get_min_backspace_stroke_count(typed, length):\n min_strokes = 99999999999999\n for backspaces in range(typed + 1):\n min_strokes = min(backspaces + expected_strokes(typed -\n backspaces, length), min_strokes)\n return min_strokes\n result = min(length + 2, get_min_backspace_stroke_count(typed, length))\n output_file.write('Case #{}: {}\\n'.format(case_num, result))\n case_num += 1\n",
"step-3": "<mask token>\ninput_file = open('input1.txt', 'r')\noutput_file = open('output1.txt', 'w')\nT = int(input_file.readline().rstrip('\\n'))\ncase_num = 1\nwhile case_num - 1 < T:\n data = map(int, input_file.readline().rstrip('\\n').split(' '))\n typed = data[0]\n length = data[1]\n probs = map(float, input_file.readline().rstrip('\\n').split(' '))\n assert that(len(probs)).equals(typed)\n enter = 1\n\n def product(probs):\n if not probs:\n return 1\n return reduce(lambda x, y: x * y, probs)\n\n def expected_strokes(typed, length):\n finish = length - typed + enter\n retype = finish + length + enter\n correct = product(probs[:typed])\n strokes = correct * finish + (1 - correct) * retype\n return strokes\n\n def get_min_backspace_stroke_count(typed, length):\n min_strokes = 99999999999999\n for backspaces in range(typed + 1):\n min_strokes = min(backspaces + expected_strokes(typed -\n backspaces, length), min_strokes)\n return min_strokes\n result = min(length + 2, get_min_backspace_stroke_count(typed, length))\n output_file.write('Case #{}: {}\\n'.format(case_num, result))\n case_num += 1\n",
"step-4": "import os\nimport numpy as np\nimport scipy as sp\nimport sys\nfrom sure import that\nfrom itertools import combinations, permutations\ninput_file = open('input1.txt', 'r')\noutput_file = open('output1.txt', 'w')\nT = int(input_file.readline().rstrip('\\n'))\ncase_num = 1\nwhile case_num - 1 < T:\n data = map(int, input_file.readline().rstrip('\\n').split(' '))\n typed = data[0]\n length = data[1]\n probs = map(float, input_file.readline().rstrip('\\n').split(' '))\n assert that(len(probs)).equals(typed)\n enter = 1\n\n def product(probs):\n if not probs:\n return 1\n return reduce(lambda x, y: x * y, probs)\n\n def expected_strokes(typed, length):\n finish = length - typed + enter\n retype = finish + length + enter\n correct = product(probs[:typed])\n strokes = correct * finish + (1 - correct) * retype\n return strokes\n\n def get_min_backspace_stroke_count(typed, length):\n min_strokes = 99999999999999\n for backspaces in range(typed + 1):\n min_strokes = min(backspaces + expected_strokes(typed -\n backspaces, length), min_strokes)\n return min_strokes\n result = min(length + 2, get_min_backspace_stroke_count(typed, length))\n output_file.write('Case #{}: {}\\n'.format(case_num, result))\n case_num += 1\n",
"step-5": "import os\nimport numpy as np\nimport scipy as sp\nimport sys\nfrom sure import that\nfrom itertools import combinations, permutations\n\n\ninput_file = open('input1.txt', 'r')\noutput_file = open('output1.txt', 'w')\n\nT = int(input_file.readline().rstrip('\\n'))\ncase_num = 1\nwhile case_num - 1 < T:\n # Parse data\n data = map(int, input_file.readline().rstrip('\\n').split(' '))\n typed = data[0]\n length = data[1]\n probs = map(float, input_file.readline().rstrip('\\n').split(' '))\n assert that(len(probs)).equals(typed)\n\n enter = 1\n\n def product(probs):\n if not probs:\n return 1\n return reduce(lambda x, y: x * y, probs)\n \n def expected_strokes(typed, length):\n finish = length - typed + enter\n retype = finish + length + enter\n correct = product(probs[:typed])\n strokes = correct * finish + (1 - correct) * retype\n return strokes\n\n def get_min_backspace_stroke_count(typed, length):\n min_strokes = 99999999999999\n for backspaces in range(typed + 1):\n min_strokes = min(backspaces + expected_strokes(typed - backspaces, length), min_strokes)\n return min_strokes\n\n result = min(length + 2, get_min_backspace_stroke_count(typed, length))\n\n # Write result\n output_file.write('Case #{}: {}\\n'.format(case_num, result))\n case_num += 1\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 22:11:53 2020
@author: Rick
"""
sum= 0;
with open('workRecord.txt') as fp:
for line in fp.readlines():
idx= line.rfind('x',len(line)-8,len(line))
if idx>=0:
sum+= float(line.rstrip()[idx+1:len(line)])
else:
sum+= 1
print(sum)
print(sum*3)
|
normal
|
{
"blob_id": "b838d2230cb3f3270e86807e875df4d3d55438cd",
"index": 8891,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx = line.rfind('x', len(line) - 8, len(line))\n if idx >= 0:\n sum += float(line.rstrip()[idx + 1:len(line)])\n else:\n sum += 1\nprint(sum)\nprint(sum * 3)\n",
"step-3": "<mask token>\nsum = 0\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx = line.rfind('x', len(line) - 8, len(line))\n if idx >= 0:\n sum += float(line.rstrip()[idx + 1:len(line)])\n else:\n sum += 1\nprint(sum)\nprint(sum * 3)\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 8 22:11:53 2020\n\n@author: Rick\n\"\"\"\nsum= 0;\nwith open('workRecord.txt') as fp:\n for line in fp.readlines():\n idx= line.rfind('x',len(line)-8,len(line))\n if idx>=0:\n sum+= float(line.rstrip()[idx+1:len(line)])\n else:\n sum+= 1\nprint(sum)\nprint(sum*3)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def process_image(image):
draw_image = np.copy(image)
window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,
xy_window=(32, 32), xy_overlap=xy_overlap_32)
window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(64, 64), xy_overlap=xy_overlap_64)
window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(80, 80), xy_overlap=xy_overlap_80)
window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,
xy_window=(128, 128), xy_overlap=xy_overlap_128)
window_detected_list = []
for window in window_list_32:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_64:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_80:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_128:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
heat = np.zeros_like(image[:, :, 0]).astype(np.float)
heat = add_heat(heat, window_detected_list)
heat = apply_threshold(heat, 4)
heatmap = np.clip(heat, 0, 255)
global heatmap_glob
if heatmap_glob == None:
heatmap_glob = heatmap
new_frame_factor = 0.3
heatmap = new_frame_factor * heatmap + (1 - new_frame_factor
) * heatmap_glob
heatmap = apply_threshold(heatmap, 4)
heatmap_glob = heatmap
labels = label(heatmap)
bboxes = get_bboxes_heatmap(labels)
valid_bboxes = []
for bbox in bboxes:
potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]
:bbox[1][0]], (64, 64))
prediction = cnn_model.predict(potential_bbox[None, :, :, :])
print(prediction)
if prediction > 0.5:
valid_bboxes.append(bbox)
draw_img = draw_bboxes(np.copy(image), valid_bboxes)
img_drawn = draw_boxes(draw_image, window_detected_list)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
<|reserved_special_token_1|>
<|reserved_special_token_0|>
svc_model = pickle.load(open('svm_model.p', 'rb'))
svc_scaler = pickle.load(open('svm_scaler.p', 'rb'))
cnn_model = load_model('model.h5')
color_space = 'RGB'
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = 'gray'
spatial_size = 32, 32
hist_bins = 32
spatial_feat = True
hist_feat = True
hog_feat = True
y_start_stop_32 = [400, 450]
y_start_stop_64 = [400, 600]
y_start_stop_80 = [400, None]
y_start_stop_128 = [400, None]
xy_overlap_32 = [0.75, 0.75]
xy_overlap_64 = [0.75, 0.75]
xy_overlap_80 = [0.5, 0.5]
xy_overlap_128 = [0.5, 0.5]
heatmap_glob = None
def process_image(image):
draw_image = np.copy(image)
window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,
xy_window=(32, 32), xy_overlap=xy_overlap_32)
window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(64, 64), xy_overlap=xy_overlap_64)
window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(80, 80), xy_overlap=xy_overlap_80)
window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,
xy_window=(128, 128), xy_overlap=xy_overlap_128)
window_detected_list = []
for window in window_list_32:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_64:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_80:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_128:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
heat = np.zeros_like(image[:, :, 0]).astype(np.float)
heat = add_heat(heat, window_detected_list)
heat = apply_threshold(heat, 4)
heatmap = np.clip(heat, 0, 255)
global heatmap_glob
if heatmap_glob == None:
heatmap_glob = heatmap
new_frame_factor = 0.3
heatmap = new_frame_factor * heatmap + (1 - new_frame_factor
) * heatmap_glob
heatmap = apply_threshold(heatmap, 4)
heatmap_glob = heatmap
labels = label(heatmap)
bboxes = get_bboxes_heatmap(labels)
valid_bboxes = []
for bbox in bboxes:
potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]
:bbox[1][0]], (64, 64))
prediction = cnn_model.predict(potential_bbox[None, :, :, :])
print(prediction)
if prediction > 0.5:
valid_bboxes.append(bbox)
draw_img = draw_bboxes(np.copy(image), valid_bboxes)
img_drawn = draw_boxes(draw_image, window_detected_list)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
<|reserved_special_token_1|>
from sklearn.svm import SVC
from helper_functions import *
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
import pickle
from scipy.ndimage.measurements import label
from keras.models import load_model
svc_model = pickle.load(open('svm_model.p', 'rb'))
svc_scaler = pickle.load(open('svm_scaler.p', 'rb'))
cnn_model = load_model('model.h5')
color_space = 'RGB'
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = 'gray'
spatial_size = 32, 32
hist_bins = 32
spatial_feat = True
hist_feat = True
hog_feat = True
y_start_stop_32 = [400, 450]
y_start_stop_64 = [400, 600]
y_start_stop_80 = [400, None]
y_start_stop_128 = [400, None]
xy_overlap_32 = [0.75, 0.75]
xy_overlap_64 = [0.75, 0.75]
xy_overlap_80 = [0.5, 0.5]
xy_overlap_128 = [0.5, 0.5]
heatmap_glob = None
def process_image(image):
draw_image = np.copy(image)
window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,
xy_window=(32, 32), xy_overlap=xy_overlap_32)
window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(64, 64), xy_overlap=xy_overlap_64)
window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,
xy_window=(80, 80), xy_overlap=xy_overlap_80)
window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,
xy_window=(128, 128), xy_overlap=xy_overlap_128)
window_detected_list = []
for window in window_list_32:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_64:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_80:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
for window in window_list_128:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][
0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=
color_space, spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell, cell_per_block=
cell_per_block, hog_channel=hog_channel, spatial_feat=
spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
heat = np.zeros_like(image[:, :, 0]).astype(np.float)
heat = add_heat(heat, window_detected_list)
heat = apply_threshold(heat, 4)
heatmap = np.clip(heat, 0, 255)
global heatmap_glob
if heatmap_glob == None:
heatmap_glob = heatmap
new_frame_factor = 0.3
heatmap = new_frame_factor * heatmap + (1 - new_frame_factor
) * heatmap_glob
heatmap = apply_threshold(heatmap, 4)
heatmap_glob = heatmap
labels = label(heatmap)
bboxes = get_bboxes_heatmap(labels)
valid_bboxes = []
for bbox in bboxes:
potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]
:bbox[1][0]], (64, 64))
prediction = cnn_model.predict(potential_bbox[None, :, :, :])
print(prediction)
if prediction > 0.5:
valid_bboxes.append(bbox)
draw_img = draw_bboxes(np.copy(image), valid_bboxes)
img_drawn = draw_boxes(draw_image, window_detected_list)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
<|reserved_special_token_1|>
from sklearn.svm import SVC
from helper_functions import *
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import glob
import pickle
from scipy.ndimage.measurements import label
from keras.models import load_model
svc_model = pickle.load(open("svm_model.p", "rb"))
svc_scaler = pickle.load(open("svm_scaler.p", "rb"))
cnn_model = load_model("model.h5")
# Parameters
color_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb
orient = 9 # HOG orientations
pix_per_cell = 8 # HOG pixels per cell
cell_per_block = 2 # HOG cells per block
hog_channel = "gray" # Can be 0, 1, 2, or "ALL"
spatial_size = (32, 32) # Spatial binning dimensions
hist_bins = 32 # Number of histogram bins
spatial_feat = True # Spatial features on or off
hist_feat = True # Histogram features on or off
hog_feat = True # HOG features on or off
y_start_stop_32 = [400, 450] # Min and max in y to search in slide_window()
y_start_stop_64 = [400, 600]
y_start_stop_80 = [400, None]
y_start_stop_128 = [400, None]
xy_overlap_32 = [0.75,0.75]
xy_overlap_64 = [0.75,0.75]
xy_overlap_80 = [0.5,0.5]
xy_overlap_128 = [0.5, 0.5]
# placeholder to save frames from video
heatmap_glob = None
def process_image(image):
# image copied to be drawn into
draw_image = np.copy(image)
# create the sliding windows for individual image
window_list_32 = slide_window(image, y_start_stop=y_start_stop_32, xy_window = (32,32), xy_overlap=xy_overlap_32)
window_list_64 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (64,64), xy_overlap=xy_overlap_64)
window_list_80 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (80,80), xy_overlap=xy_overlap_80)
window_list_128 = slide_window(image, y_start_stop=y_start_stop_128, xy_window = (128,128), xy_overlap=xy_overlap_128)
# placeholder for detected window
window_detected_list = []
# iterate through the windows and detect vehicle
for window in window_list_32:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_64:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_80:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# iterate through the windows and detect vehicle
for window in window_list_128:
window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
window_features = single_extract_features(window_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# Reshape and apply scaling
reshaped = window_features.reshape(1, -1)
window_features_scaled = svc_scaler.transform(reshaped)
# Predict using your classifier
prediction = svc_model.predict(window_features_scaled)
if prediction == 1:
window_detected_list.append(window)
# Create a copy placeholder for heatmap
heat = np.zeros_like(image[:,:,0]).astype(np.float)
# Add heat to each box in window list
heat = add_heat(heat, window_detected_list)
# Apply threshold to help remove false positives
heat = apply_threshold(heat, 4)
# Visualize the heatmap when displaying
heatmap = np.clip(heat, 0, 255)
# Check if this is first init, initialise global heatmap
global heatmap_glob
if (heatmap_glob == None):
heatmap_glob = heatmap
new_frame_factor = 0.3
heatmap = new_frame_factor * heatmap + (1 - new_frame_factor) * heatmap_glob
heatmap = apply_threshold(heatmap, 4)
#update heatmap glob
heatmap_glob = heatmap
# Find final boxes from heatmap using label function
labels = label(heatmap)
# Get bounding box of the heatmap labels to get the image to feed into our cnn
bboxes = get_bboxes_heatmap(labels)
# Placeholder for CNN classification
valid_bboxes = []
# Feed each bbox image into CNN
for bbox in bboxes:
potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]], (64, 64))
prediction = cnn_model.predict(potential_bbox[None,:,:,:])
print(prediction)
if prediction > 0.5:
valid_bboxes.append(bbox)
# Draw box for validated bbox by CNN
draw_img = draw_bboxes(np.copy(image), valid_bboxes)
# draw boxes for detected window
img_drawn = draw_boxes(draw_image, window_detected_list)
draw_img = draw_labeled_bboxes(np.copy(image), labels)
return draw_img
|
flexible
|
{
"blob_id": "89db4431a252d024381713eb7ad86346814fcbe4",
"index": 7955,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_image(image):\n draw_image = np.copy(image)\n window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,\n xy_window=(32, 32), xy_overlap=xy_overlap_32)\n window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(64, 64), xy_overlap=xy_overlap_64)\n window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(80, 80), xy_overlap=xy_overlap_80)\n window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,\n xy_window=(128, 128), xy_overlap=xy_overlap_128)\n window_detected_list = []\n for window in window_list_32:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_64:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_80:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_128:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n heat = add_heat(heat, window_detected_list)\n heat = apply_threshold(heat, 4)\n heatmap = np.clip(heat, 0, 255)\n global heatmap_glob\n if heatmap_glob == None:\n heatmap_glob = heatmap\n new_frame_factor = 0.3\n heatmap = new_frame_factor * heatmap + (1 - new_frame_factor\n ) * heatmap_glob\n heatmap = apply_threshold(heatmap, 4)\n heatmap_glob = heatmap\n labels = label(heatmap)\n bboxes = get_bboxes_heatmap(labels)\n valid_bboxes = []\n for bbox in bboxes:\n potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]\n :bbox[1][0]], (64, 64))\n prediction = cnn_model.predict(potential_bbox[None, :, :, :])\n print(prediction)\n if prediction > 0.5:\n valid_bboxes.append(bbox)\n draw_img = draw_bboxes(np.copy(image), valid_bboxes)\n img_drawn = draw_boxes(draw_image, window_detected_list)\n draw_img = draw_labeled_bboxes(np.copy(image), labels)\n return draw_img\n",
"step-3": "<mask token>\nsvc_model = pickle.load(open('svm_model.p', 'rb'))\nsvc_scaler = pickle.load(open('svm_scaler.p', 'rb'))\ncnn_model = load_model('model.h5')\ncolor_space = 'RGB'\norient = 9\npix_per_cell = 8\ncell_per_block = 2\nhog_channel = 'gray'\nspatial_size = 32, 32\nhist_bins = 32\nspatial_feat = True\nhist_feat = True\nhog_feat = True\ny_start_stop_32 = [400, 450]\ny_start_stop_64 = [400, 600]\ny_start_stop_80 = [400, None]\ny_start_stop_128 = [400, None]\nxy_overlap_32 = [0.75, 0.75]\nxy_overlap_64 = [0.75, 0.75]\nxy_overlap_80 = [0.5, 0.5]\nxy_overlap_128 = [0.5, 0.5]\nheatmap_glob = None\n\n\ndef process_image(image):\n draw_image = np.copy(image)\n window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,\n xy_window=(32, 32), xy_overlap=xy_overlap_32)\n window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(64, 64), xy_overlap=xy_overlap_64)\n window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(80, 80), xy_overlap=xy_overlap_80)\n window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,\n xy_window=(128, 128), xy_overlap=xy_overlap_128)\n window_detected_list = []\n for window in window_list_32:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_64:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_80:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_128:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n heat = add_heat(heat, window_detected_list)\n heat = apply_threshold(heat, 4)\n heatmap = np.clip(heat, 0, 255)\n global heatmap_glob\n if heatmap_glob == None:\n heatmap_glob = heatmap\n new_frame_factor = 0.3\n heatmap = new_frame_factor * heatmap + (1 - new_frame_factor\n ) * heatmap_glob\n heatmap = apply_threshold(heatmap, 4)\n heatmap_glob = heatmap\n labels = label(heatmap)\n bboxes = get_bboxes_heatmap(labels)\n valid_bboxes = []\n for bbox in bboxes:\n potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]\n :bbox[1][0]], (64, 64))\n prediction = cnn_model.predict(potential_bbox[None, :, :, :])\n print(prediction)\n if prediction > 0.5:\n valid_bboxes.append(bbox)\n draw_img = draw_bboxes(np.copy(image), valid_bboxes)\n img_drawn = draw_boxes(draw_image, window_detected_list)\n draw_img = draw_labeled_bboxes(np.copy(image), labels)\n return draw_img\n",
"step-4": "from sklearn.svm import SVC\nfrom helper_functions import *\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport glob\nimport pickle\nfrom scipy.ndimage.measurements import label\nfrom keras.models import load_model\nsvc_model = pickle.load(open('svm_model.p', 'rb'))\nsvc_scaler = pickle.load(open('svm_scaler.p', 'rb'))\ncnn_model = load_model('model.h5')\ncolor_space = 'RGB'\norient = 9\npix_per_cell = 8\ncell_per_block = 2\nhog_channel = 'gray'\nspatial_size = 32, 32\nhist_bins = 32\nspatial_feat = True\nhist_feat = True\nhog_feat = True\ny_start_stop_32 = [400, 450]\ny_start_stop_64 = [400, 600]\ny_start_stop_80 = [400, None]\ny_start_stop_128 = [400, None]\nxy_overlap_32 = [0.75, 0.75]\nxy_overlap_64 = [0.75, 0.75]\nxy_overlap_80 = [0.5, 0.5]\nxy_overlap_128 = [0.5, 0.5]\nheatmap_glob = None\n\n\ndef process_image(image):\n draw_image = np.copy(image)\n window_list_32 = slide_window(image, y_start_stop=y_start_stop_32,\n xy_window=(32, 32), xy_overlap=xy_overlap_32)\n window_list_64 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(64, 64), xy_overlap=xy_overlap_64)\n window_list_80 = slide_window(image, y_start_stop=y_start_stop_64,\n xy_window=(80, 80), xy_overlap=xy_overlap_80)\n window_list_128 = slide_window(image, y_start_stop=y_start_stop_128,\n xy_window=(128, 128), xy_overlap=xy_overlap_128)\n window_detected_list = []\n for window in window_list_32:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_64:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_80:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n for window in window_list_128:\n window_img = cv2.resize(image[window[0][1]:window[1][1], window[0][\n 0]:window[1][0]], (64, 64))\n window_features = single_extract_features(window_img, color_space=\n color_space, spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell, cell_per_block=\n cell_per_block, hog_channel=hog_channel, spatial_feat=\n spatial_feat, hist_feat=hist_feat, hog_feat=hog_feat)\n reshaped = window_features.reshape(1, -1)\n window_features_scaled = svc_scaler.transform(reshaped)\n prediction = svc_model.predict(window_features_scaled)\n if prediction == 1:\n window_detected_list.append(window)\n heat = np.zeros_like(image[:, :, 0]).astype(np.float)\n heat = add_heat(heat, window_detected_list)\n heat = apply_threshold(heat, 4)\n heatmap = np.clip(heat, 0, 255)\n global heatmap_glob\n if heatmap_glob == None:\n heatmap_glob = heatmap\n new_frame_factor = 0.3\n heatmap = new_frame_factor * heatmap + (1 - new_frame_factor\n ) * heatmap_glob\n heatmap = apply_threshold(heatmap, 4)\n heatmap_glob = heatmap\n labels = label(heatmap)\n bboxes = get_bboxes_heatmap(labels)\n valid_bboxes = []\n for bbox in bboxes:\n potential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]\n :bbox[1][0]], (64, 64))\n prediction = cnn_model.predict(potential_bbox[None, :, :, :])\n print(prediction)\n if prediction > 0.5:\n valid_bboxes.append(bbox)\n draw_img = draw_bboxes(np.copy(image), valid_bboxes)\n img_drawn = draw_boxes(draw_image, window_detected_list)\n draw_img = draw_labeled_bboxes(np.copy(image), labels)\n return draw_img\n",
"step-5": "from sklearn.svm import SVC\nfrom helper_functions import *\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport glob\nimport pickle\nfrom scipy.ndimage.measurements import label\nfrom keras.models import load_model\n\nsvc_model = pickle.load(open(\"svm_model.p\", \"rb\"))\nsvc_scaler = pickle.load(open(\"svm_scaler.p\", \"rb\"))\ncnn_model = load_model(\"model.h5\")\n\n# Parameters\ncolor_space = 'RGB' # Can be RGB, HSV, LUV, HLS, YUV, YCrCb\norient = 9 # HOG orientations\npix_per_cell = 8 # HOG pixels per cell\ncell_per_block = 2 # HOG cells per block\nhog_channel = \"gray\" # Can be 0, 1, 2, or \"ALL\"\nspatial_size = (32, 32) # Spatial binning dimensions\nhist_bins = 32 # Number of histogram bins\nspatial_feat = True # Spatial features on or off\nhist_feat = True # Histogram features on or off\nhog_feat = True # HOG features on or off\ny_start_stop_32 = [400, 450] # Min and max in y to search in slide_window()\ny_start_stop_64 = [400, 600]\ny_start_stop_80 = [400, None]\ny_start_stop_128 = [400, None]\nxy_overlap_32 = [0.75,0.75]\nxy_overlap_64 = [0.75,0.75]\nxy_overlap_80 = [0.5,0.5]\nxy_overlap_128 = [0.5, 0.5]\n\n# placeholder to save frames from video\nheatmap_glob = None\n\ndef process_image(image):\n\n\t# image copied to be drawn into\n\tdraw_image = np.copy(image)\n\n\t# create the sliding windows for individual image\n\twindow_list_32 = slide_window(image, y_start_stop=y_start_stop_32, xy_window = (32,32), xy_overlap=xy_overlap_32)\n\twindow_list_64 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (64,64), xy_overlap=xy_overlap_64)\n\twindow_list_80 = slide_window(image, y_start_stop=y_start_stop_64, xy_window = (80,80), xy_overlap=xy_overlap_80)\n\twindow_list_128 = slide_window(image, y_start_stop=y_start_stop_128, xy_window = (128,128), xy_overlap=xy_overlap_128)\n\n\t# placeholder for detected window\n\twindow_detected_list = []\n\n\t# iterate through the windows and detect vehicle\n\tfor window in window_list_32:\n\t\twindow_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n\t\twindow_features = single_extract_features(window_img, color_space=color_space, \n\t\t\tspatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\torient=orient, pix_per_cell=pix_per_cell, \n\t\t\tcell_per_block=cell_per_block, \n\t\t\thog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\thist_feat=hist_feat, hog_feat=hog_feat)\n\t\t# Reshape and apply scaling\n\t\treshaped = window_features.reshape(1, -1)\n\t\twindow_features_scaled = svc_scaler.transform(reshaped)\n\t\t# Predict using your classifier\n\t\tprediction = svc_model.predict(window_features_scaled)\n\t\tif prediction == 1:\n\t\t\twindow_detected_list.append(window)\n\n\t# iterate through the windows and detect vehicle\n\tfor window in window_list_64:\n\t\twindow_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n\t\twindow_features = single_extract_features(window_img, color_space=color_space, \n\t\t\tspatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\torient=orient, pix_per_cell=pix_per_cell, \n\t\t\tcell_per_block=cell_per_block, \n\t\t\thog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\thist_feat=hist_feat, hog_feat=hog_feat)\n\t\t# Reshape and apply scaling\n\t\treshaped = window_features.reshape(1, -1)\n\t\twindow_features_scaled = svc_scaler.transform(reshaped)\n\t\t# Predict using your classifier\n\t\tprediction = svc_model.predict(window_features_scaled)\n\t\tif prediction == 1:\n\t\t\twindow_detected_list.append(window)\n\n\t\t# iterate through the windows and detect vehicle\n\tfor window in window_list_80:\n\t\twindow_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n\t\twindow_features = single_extract_features(window_img, color_space=color_space, \n\t\t\tspatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\torient=orient, pix_per_cell=pix_per_cell, \n\t\t\tcell_per_block=cell_per_block, \n\t\t\thog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\thist_feat=hist_feat, hog_feat=hog_feat)\n\t\t# Reshape and apply scaling\n\t\treshaped = window_features.reshape(1, -1)\n\t\twindow_features_scaled = svc_scaler.transform(reshaped)\n\t\t# Predict using your classifier\n\t\tprediction = svc_model.predict(window_features_scaled)\n\t\tif prediction == 1:\n\t\t\twindow_detected_list.append(window)\n\n\t# iterate through the windows and detect vehicle\n\tfor window in window_list_128:\n\t\twindow_img = cv2.resize(image[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64)) \n\t\twindow_features = single_extract_features(window_img, color_space=color_space, \n\t\t\tspatial_size=spatial_size, hist_bins=hist_bins, \n\t\t\torient=orient, pix_per_cell=pix_per_cell, \n\t\t\tcell_per_block=cell_per_block, \n\t\t\thog_channel=hog_channel, spatial_feat=spatial_feat, \n\t\t\thist_feat=hist_feat, hog_feat=hog_feat)\n\t\t# Reshape and apply scaling\n\t\treshaped = window_features.reshape(1, -1)\n\t\twindow_features_scaled = svc_scaler.transform(reshaped)\n\t\t# Predict using your classifier\n\t\tprediction = svc_model.predict(window_features_scaled)\n\t\tif prediction == 1:\n\t\t\twindow_detected_list.append(window)\n\n\t# Create a copy placeholder for heatmap\n\theat = np.zeros_like(image[:,:,0]).astype(np.float)\n\n\t# Add heat to each box in window list\n\theat = add_heat(heat, window_detected_list)\n\t \n\t# Apply threshold to help remove false positives\n\theat = apply_threshold(heat, 4)\n\n\t# Visualize the heatmap when displaying \n\theatmap = np.clip(heat, 0, 255)\n\n\t# Check if this is first init, initialise global heatmap\n\tglobal heatmap_glob\n\tif (heatmap_glob == None):\n\t\theatmap_glob = heatmap\n\n\tnew_frame_factor = 0.3\n\theatmap = new_frame_factor * heatmap + (1 - new_frame_factor) * heatmap_glob\n\theatmap = apply_threshold(heatmap, 4)\n\n\t#update heatmap glob\n\theatmap_glob = heatmap\n\n\t# Find final boxes from heatmap using label function\n\tlabels = label(heatmap)\n\n\t# Get bounding box of the heatmap labels to get the image to feed into our cnn\n\tbboxes = get_bboxes_heatmap(labels)\n\t# Placeholder for CNN classification\n\tvalid_bboxes = []\n\n\t# Feed each bbox image into CNN\n\tfor bbox in bboxes:\n\t\tpotential_bbox = cv2.resize(image[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]], (64, 64)) \n\t\tprediction = cnn_model.predict(potential_bbox[None,:,:,:])\n\t\tprint(prediction)\n\t\tif prediction > 0.5:\n\t\t\tvalid_bboxes.append(bbox)\n\n\t# Draw box for validated bbox by CNN\n\tdraw_img = draw_bboxes(np.copy(image), valid_bboxes)\n\n\t# draw boxes for detected window\n\timg_drawn = draw_boxes(draw_image, window_detected_list)\n\n\tdraw_img = draw_labeled_bboxes(np.copy(image), labels)\n\n\treturn draw_img\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while i < len(questions):
user_answers = input('{}...'.format(questions[i]))
if user_answers.capitalize() == answers[i]:
count_answers = count_answers + 1
i += 1
print('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.
format(i=i, count_answers=count_answers))
<|reserved_special_token_1|>
questions = ('Какой язык мы учим?',
'Какой тип данных имеет целая переменная?',
'Какой тип данных имеет вещественная переменная?',
'Какой тип данных имеет логическая переменная?',
'Какой тип данных имеет символьная переменная?')
answers = 'Python', 'Integer', 'Float', 'Bool', 'String'
i = 0
count_answers = 0
while i < len(questions):
user_answers = input('{}...'.format(questions[i]))
if user_answers.capitalize() == answers[i]:
count_answers = count_answers + 1
i += 1
print('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.
format(i=i, count_answers=count_answers))
<|reserved_special_token_1|>
questions = ('Какой язык мы учим?', 'Какой тип данных имеет целая переменная?', 'Какой тип данных имеет вещественная переменная?', 'Какой тип данных имеет логическая переменная?', 'Какой тип данных имеет символьная переменная?')
answers = ('Python', 'Integer', 'Float', 'Bool', 'String')
i = 0
count_answers = 0
while i < len(questions):
user_answers = input('{}...'.format(questions[i]))
if user_answers.capitalize() == answers[i]:
count_answers = count_answers + 1
i += 1
print('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.format(i = i, count_answers = count_answers))
|
flexible
|
{
"blob_id": "dd936839d71b97b3a21115498092d8984de0e3f1",
"index": 7445,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile i < len(questions):\n user_answers = input('{}...'.format(questions[i]))\n if user_answers.capitalize() == answers[i]:\n count_answers = count_answers + 1\n i += 1\nprint('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.\n format(i=i, count_answers=count_answers))\n",
"step-3": "questions = ('Какой язык мы учим?',\n 'Какой тип данных имеет целая переменная?',\n 'Какой тип данных имеет вещественная переменная?',\n 'Какой тип данных имеет логическая переменная?',\n 'Какой тип данных имеет символьная переменная?')\nanswers = 'Python', 'Integer', 'Float', 'Bool', 'String'\ni = 0\ncount_answers = 0\nwhile i < len(questions):\n user_answers = input('{}...'.format(questions[i]))\n if user_answers.capitalize() == answers[i]:\n count_answers = count_answers + 1\n i += 1\nprint('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.\n format(i=i, count_answers=count_answers))\n",
"step-4": "questions = ('Какой язык мы учим?', 'Какой тип данных имеет целая переменная?', 'Какой тип данных имеет вещественная переменная?', 'Какой тип данных имеет логическая переменная?', 'Какой тип данных имеет символьная переменная?')\nanswers = ('Python', 'Integer', 'Float', 'Bool', 'String')\ni = 0\ncount_answers = 0\nwhile i < len(questions):\n user_answers = input('{}...'.format(questions[i]))\n if user_answers.capitalize() == answers[i]:\n count_answers = count_answers + 1\n i += 1\nprint('Было задано {i} вопросов. Правильных ответов - {count_answers}!'.format(i = i, count_answers = count_answers))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(s2)
s1.add(100)
print(s1.pop())
print(10 in s1)
print(10 not in s1)
<|reserved_special_token_1|>
s1 = {10, 20, 30, 60, 70, 80, 90}
s2 = set()
print(s2)
s1.add(100)
print(s1.pop())
print(10 in s1)
print(10 not in s1)
|
flexible
|
{
"blob_id": "3747e45dcba548060f25bd6d6f0e0e96091ca3df",
"index": 2358,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s2)\ns1.add(100)\nprint(s1.pop())\nprint(10 in s1)\nprint(10 not in s1)\n",
"step-3": "s1 = {10, 20, 30, 60, 70, 80, 90}\ns2 = set()\nprint(s2)\ns1.add(100)\nprint(s1.pop())\nprint(10 in s1)\nprint(10 not in s1)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def parse_text(request):
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(
'\\(?(\\d{3})\\)?[\\.\\-]?\\s*(\\d{3})\\s*[\\.\\-]?\\s*(\\d{4})',
text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1],
match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form}
)
<|reserved_special_token_1|>
from django.http import JsonResponse
from django.shortcuts import render
from phone_number_parser.forms import TextForm
import re
def parse_text(request):
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(
'\\(?(\\d{3})\\)?[\\.\\-]?\\s*(\\d{3})\\s*[\\.\\-]?\\s*(\\d{4})',
text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1],
match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form}
)
<|reserved_special_token_1|>
from django.http import JsonResponse
from django.shortcuts import render
from phone_number_parser.forms import TextForm
import re
def parse_text(request):
###########################################################################
#
# Parse Text is the lone view for this project. A GET request renders a
# form with one textarea field. A POST of this form passes the text via an
# ajax call in the field 'the_text'. The text is parsed using REGEX for
# phone numbers and passed back as a JSON object.
# See main.js for the ajax request and success callback function.
#
###########################################################################
if request.method == 'POST':
text = request.POST.get('the_text')
phone_number_list = []
matches = re.findall(r'\(?(\d{3})\)?[\.\-]?\s*(\d{3})\s*[\.\-]?\s*(\d{4})', text)
for match in matches:
phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))
response_data = {'phone_number_list': phone_number_list}
return JsonResponse(response_data)
else:
form = TextForm()
return render(request, 'phone_number_parser/index.html', {'form': form})
|
flexible
|
{
"blob_id": "d27a7ca04e12d50aca5a9f9db199102dbeb4e9f1",
"index": 7678,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-3": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(\n '\\\\(?(\\\\d{3})\\\\)?[\\\\.\\\\-]?\\\\s*(\\\\d{3})\\\\s*[\\\\.\\\\-]?\\\\s*(\\\\d{4})',\n text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1],\n match[2]))\n response_data = {'phone_number_list': phone_number_list}\n return JsonResponse(response_data)\n else:\n form = TextForm()\n return render(request, 'phone_number_parser/index.html', {'form': form}\n )\n",
"step-4": "from django.http import JsonResponse\nfrom django.shortcuts import render\nfrom phone_number_parser.forms import TextForm\nimport re\n\n\ndef parse_text(request):\n ###########################################################################\n #\n # Parse Text is the lone view for this project. A GET request renders a\n # form with one textarea field. A POST of this form passes the text via an\n # ajax call in the field 'the_text'. The text is parsed using REGEX for\n # phone numbers and passed back as a JSON object.\n # See main.js for the ajax request and success callback function.\n #\n ###########################################################################\n\n if request.method == 'POST':\n text = request.POST.get('the_text')\n phone_number_list = []\n matches = re.findall(r'\\(?(\\d{3})\\)?[\\.\\-]?\\s*(\\d{3})\\s*[\\.\\-]?\\s*(\\d{4})', text)\n for match in matches:\n phone_number_list.append('({}) {}-{}'.format(match[0], match[1], match[2]))\n\n response_data = {'phone_number_list': phone_number_list}\n\n return JsonResponse(response_data)\n\n else:\n form = TextForm()\n\n return render(request, 'phone_number_parser/index.html', {'form': form})\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile],
job=job)
self.declareProgress(0.4,
'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
db.updateRecord(dos)
self.declareProgress(0.5, 'get result from server')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile],
job=job)
self.declareProgress(0.4,
'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
db.updateRecord(dos)
self.declareProgress(0.5, 'get result from server')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
def retriever():
return Retriever('mddoscalc')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile],
job=job)
self.declareProgress(0.4,
'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
db.updateRecord(dos)
self.declareProgress(0.5, 'get result from server')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
def retriever():
return Retriever('mddoscalc')
__id__ = '$Id$'
<|reserved_special_token_1|>
from vnf.components.ComputationResultRetriever import ComputationResultRetriever as base
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile],
job=job)
self.declareProgress(0.4,
'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
db.updateRecord(dos)
self.declareProgress(0.5, 'get result from server')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
def retriever():
return Retriever('mddoscalc')
__id__ = '$Id$'
<|reserved_special_token_1|>
# -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# California Institute of Technology
# (C) 2008 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from vnf.components.ComputationResultRetriever import ComputationResultRetriever as base
class Retriever(base):
def _retrieveResultsFor(self, computation):
director = self.director
db = director.clerk.db
orm = director.clerk.orm
analysisObj = orm.record2object(computation)
# must have a job
self.declareProgress(0.1, 'look up job')
job = computation.getJob(db)
# check result is available
self.declareProgress(0.2, 'check if results exist')
self._check_results_sanity(expected_results=[analysisObj._dosFile], job=job)
# create a dos record to save dos
self.declareProgress(0.4, 'create a DOS data object to store the result')
from vnf.dom.material_simulations.PhononDOS import PhononDOSTable
dos = self._make_result_holder(computation, PhononDOSTable)
#dos.matter = computation.matter #analysis calc does not have matter ref!
db.updateRecord(dos)
# save the result from job to dos
#dosObj = orm.record2object(dos)
#server = self.db.dereference(job.server)
#is_available = self.dds.is_available
#dosObj.read(analysisObj.dosFile)
#from idf import DOS
#DOS.write(dosObj.e, dosObj.i, 'data.idf')
self.declareProgress(0.5, 'get result from server')
#self._save_result(computation, job, analysisObj.dosFile, dos, 'data.txt')
self._save_result(computation, job, 'data.idf', dos, 'data.idf')
def retriever():
return Retriever('mddoscalc')
# version
__id__ = "$Id$"
# End of file
|
flexible
|
{
"blob_id": "721e014bc5bf53a39556e31f281b77b90508cf12",
"index": 7138,
"step-1": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n__id__ = '$Id$'\n",
"step-4": "from vnf.components.ComputationResultRetriever import ComputationResultRetriever as base\n\n\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile],\n job=job)\n self.declareProgress(0.4,\n 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n db.updateRecord(dos)\n self.declareProgress(0.5, 'get result from server')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n__id__ = '$Id$'\n",
"step-5": "# -*- Python -*-\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# California Institute of Technology\n# (C) 2008 All Rights Reserved\n#\n# {LicenseText}\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\nfrom vnf.components.ComputationResultRetriever import ComputationResultRetriever as base\nclass Retriever(base):\n\n def _retrieveResultsFor(self, computation):\n director = self.director\n db = director.clerk.db\n orm = director.clerk.orm\n analysisObj = orm.record2object(computation)\n\n # must have a job\n self.declareProgress(0.1, 'look up job')\n job = computation.getJob(db)\n\n # check result is available\n self.declareProgress(0.2, 'check if results exist')\n self._check_results_sanity(expected_results=[analysisObj._dosFile], job=job)\n\n # create a dos record to save dos\n self.declareProgress(0.4, 'create a DOS data object to store the result')\n from vnf.dom.material_simulations.PhononDOS import PhononDOSTable\n dos = self._make_result_holder(computation, PhononDOSTable)\n #dos.matter = computation.matter #analysis calc does not have matter ref!\n db.updateRecord(dos)\n\n # save the result from job to dos\n \n #dosObj = orm.record2object(dos)\n #server = self.db.dereference(job.server)\n #is_available = self.dds.is_available\n #dosObj.read(analysisObj.dosFile)\n #from idf import DOS\n #DOS.write(dosObj.e, dosObj.i, 'data.idf')\n self.declareProgress(0.5, 'get result from server')\n #self._save_result(computation, job, analysisObj.dosFile, dos, 'data.txt')\n self._save_result(computation, job, 'data.idf', dos, 'data.idf')\n\ndef retriever():\n return Retriever('mddoscalc')\n\n\n# version\n__id__ = \"$Id$\"\n\n# End of file \n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_configuration(host):
sshd = host.file('/etc/ssh/sshd_config')
assert sshd.contains(r'^PermitRootLogin no$')
assert sshd.contains(r'^X11Forwarding no$')
assert sshd.contains(r'^UsePAM yes$')
assert sshd.contains(r'\sPermitTTY no$')
ssh = host.file('/etc/ssh/ssh_config')
assert ssh.contains(r'^User test$')
assert ssh.contains(r'^Host \*$')
assert ssh.contains(r'\sPort 23$')
def test_service(host):
ssh = host.service('ssh')
assert ssh.is_running
assert ssh.is_enabled
assert host.socket('tcp://0.0.0.0:22').is_listening
|
normal
|
{
"blob_id": "2345d1f72fb695ccec5af0ed157c0606f197009c",
"index": 3398,
"step-1": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-3": "<mask token>\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-4": "import os\nimport testinfra.utils.ansible_runner\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(os.environ[\n 'MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains('^PermitRootLogin no$')\n assert sshd.contains('^X11Forwarding no$')\n assert sshd.contains('^UsePAM yes$')\n assert sshd.contains('\\\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains('^User test$')\n assert ssh.contains('^Host \\\\*$')\n assert ssh.contains('\\\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-5": "import os\n\nimport testinfra.utils.ansible_runner\n\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')\n\n\ndef test_configuration(host):\n sshd = host.file('/etc/ssh/sshd_config')\n assert sshd.contains(r'^PermitRootLogin no$')\n assert sshd.contains(r'^X11Forwarding no$')\n assert sshd.contains(r'^UsePAM yes$')\n assert sshd.contains(r'\\sPermitTTY no$')\n ssh = host.file('/etc/ssh/ssh_config')\n assert ssh.contains(r'^User test$')\n assert ssh.contains(r'^Host \\*$')\n assert ssh.contains(r'\\sPort 23$')\n\n\ndef test_service(host):\n ssh = host.service('ssh')\n assert ssh.is_running\n assert ssh.is_enabled\n assert host.socket('tcp://0.0.0.0:22').is_listening\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Generated by Django 3.0.7 on 2020-07-03 11:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('descriptor', '0007_auto_20200702_1653'),
]
operations = [
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40, verbose_name='name')),
('format', models.CharField(choices=[('AN', 'alphanumeric'), ('NB', 'number'), ('AR', 'array')], max_length=2, verbose_name='format')),
('size', models.CharField(max_length=20, verbose_name='size')),
('required', models.BooleanField(default=True, verbose_name='required')),
('domain_rules', models.CharField(max_length=120, verbose_name='domain rules')),
],
options={
'verbose_name': 'parameter',
'verbose_name_plural': 'parameter',
},
),
migrations.AlterField(
model_name='service',
name='http_method',
field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), ('PUT', 'PUT'), ('DELETE', 'DELETE'), ('PATCH', 'PATCH')], max_length=6, verbose_name='method'),
),
migrations.AlterField(
model_name='service',
name='status',
field=models.CharField(choices=[('ST', 'under study'), ('DV', 'under development'), ('HM', 'under test'), ('DP', 'deployed')], max_length=2),
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Parameter')),
('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Service')),
],
options={
'verbose_name': 'request',
'verbose_name_plural': 'requests',
},
),
]
|
normal
|
{
"blob_id": "3a5d55ea5a2f4f6cf7aaf55055593db9f8bb3562",
"index": 6308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('descriptor', '0007_auto_20200702_1653')]\n operations = [migrations.CreateModel(name='Parameter', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=40,\n verbose_name='name')), ('format', models.CharField(choices=[('AN',\n 'alphanumeric'), ('NB', 'number'), ('AR', 'array')], max_length=2,\n verbose_name='format')), ('size', models.CharField(max_length=20,\n verbose_name='size')), ('required', models.BooleanField(default=\n True, verbose_name='required')), ('domain_rules', models.CharField(\n max_length=120, verbose_name='domain rules'))], options={\n 'verbose_name': 'parameter', 'verbose_name_plural': 'parameter'}),\n migrations.AlterField(model_name='service', name='http_method',\n field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), (\n 'PUT', 'PUT'), ('DELETE', 'DELETE'), ('PATCH', 'PATCH')],\n max_length=6, verbose_name='method')), migrations.AlterField(\n model_name='service', name='status', field=models.CharField(choices\n =[('ST', 'under study'), ('DV', 'under development'), ('HM',\n 'under test'), ('DP', 'deployed')], max_length=2)), migrations.\n CreateModel(name='Request', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('parameter', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='descriptor.Parameter')), ('service', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'descriptor.Service'))], options={'verbose_name': 'request',\n 'verbose_name_plural': 'requests'})]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('descriptor', '0007_auto_20200702_1653')]\n operations = [migrations.CreateModel(name='Parameter', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=40,\n verbose_name='name')), ('format', models.CharField(choices=[('AN',\n 'alphanumeric'), ('NB', 'number'), ('AR', 'array')], max_length=2,\n verbose_name='format')), ('size', models.CharField(max_length=20,\n verbose_name='size')), ('required', models.BooleanField(default=\n True, verbose_name='required')), ('domain_rules', models.CharField(\n max_length=120, verbose_name='domain rules'))], options={\n 'verbose_name': 'parameter', 'verbose_name_plural': 'parameter'}),\n migrations.AlterField(model_name='service', name='http_method',\n field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), (\n 'PUT', 'PUT'), ('DELETE', 'DELETE'), ('PATCH', 'PATCH')],\n max_length=6, verbose_name='method')), migrations.AlterField(\n model_name='service', name='status', field=models.CharField(choices\n =[('ST', 'under study'), ('DV', 'under development'), ('HM',\n 'under test'), ('DP', 'deployed')], max_length=2)), migrations.\n CreateModel(name='Request', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('parameter', models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='descriptor.Parameter')), ('service', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'descriptor.Service'))], options={'verbose_name': 'request',\n 'verbose_name_plural': 'requests'})]\n",
"step-5": "# Generated by Django 3.0.7 on 2020-07-03 11:08\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('descriptor', '0007_auto_20200702_1653'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Parameter',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=40, verbose_name='name')),\n ('format', models.CharField(choices=[('AN', 'alphanumeric'), ('NB', 'number'), ('AR', 'array')], max_length=2, verbose_name='format')),\n ('size', models.CharField(max_length=20, verbose_name='size')),\n ('required', models.BooleanField(default=True, verbose_name='required')),\n ('domain_rules', models.CharField(max_length=120, verbose_name='domain rules')),\n ],\n options={\n 'verbose_name': 'parameter',\n 'verbose_name_plural': 'parameter',\n },\n ),\n migrations.AlterField(\n model_name='service',\n name='http_method',\n field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST'), ('PUT', 'PUT'), ('DELETE', 'DELETE'), ('PATCH', 'PATCH')], max_length=6, verbose_name='method'),\n ),\n migrations.AlterField(\n model_name='service',\n name='status',\n field=models.CharField(choices=[('ST', 'under study'), ('DV', 'under development'), ('HM', 'under test'), ('DP', 'deployed')], max_length=2),\n ),\n migrations.CreateModel(\n name='Request',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('parameter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Parameter')),\n ('service', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='descriptor.Service')),\n ],\n options={\n 'verbose_name': 'request',\n 'verbose_name_plural': 'requests',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
myDict = {'Friends': ['AP', 'Soham', 'Baba'], 'Likes': ['Math',
'Programming'], 'languages': ['C++', 'Python', 'Java']}
myInt = 123
myFloat = 12.3333
myName = 'Somesh Thakur'
|
normal
|
{
"blob_id": "345967e2aeafda6ce30cbbbbacf976c97b17def7",
"index": 515,
"step-1": "<mask token>\n",
"step-2": "myDict = {'Friends': ['AP', 'Soham', 'Baba'], 'Likes': ['Math',\n 'Programming'], 'languages': ['C++', 'Python', 'Java']}\nmyInt = 123\nmyFloat = 12.3333\nmyName = 'Somesh Thakur'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
"""
The MIT License (MIT)
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Literal, TypedDict
from .member import Member
from .snowflake import Snowflake
from .user import User
ScheduledEventStatus = Literal[1, 2, 3, 4]
ScheduledEventLocationType = Literal[1, 2, 3]
ScheduledEventPrivacyLevel = Literal[2]
class ScheduledEvent(TypedDict):
id: Snowflake
guild_id: Snowflake
channel_id: Snowflake
creator_id: Snowflake
name: str
description: str
image: str | None
scheduled_start_time: str
scheduled_end_time: str | None
privacy_level: ScheduledEventPrivacyLevel
status: ScheduledEventStatus
entity_type: ScheduledEventLocationType
entity_id: Snowflake
entity_metadata: ScheduledEventEntityMetadata
creator: User
user_count: int | None
class ScheduledEventEntityMetadata(TypedDict):
location: str
class ScheduledEventSubscriber(TypedDict):
guild_scheduled_event_id: Snowflake
user: User
member: Member | None
|
normal
|
{
"blob_id": "a73dcfc21c31d4e984db39c072d11cb9a9c3d5e5",
"index": 2470,
"step-1": "<mask token>\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-2": "<mask token>\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-3": "<mask token>\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-4": "<mask token>\nfrom __future__ import annotations\nfrom typing import Literal, TypedDict\nfrom .member import Member\nfrom .snowflake import Snowflake\nfrom .user import User\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-5": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import Literal, TypedDict\n\nfrom .member import Member\nfrom .snowflake import Snowflake\nfrom .user import User\n\nScheduledEventStatus = Literal[1, 2, 3, 4]\nScheduledEventLocationType = Literal[1, 2, 3]\nScheduledEventPrivacyLevel = Literal[2]\n\n\nclass ScheduledEvent(TypedDict):\n id: Snowflake\n guild_id: Snowflake\n channel_id: Snowflake\n creator_id: Snowflake\n name: str\n description: str\n image: str | None\n scheduled_start_time: str\n scheduled_end_time: str | None\n privacy_level: ScheduledEventPrivacyLevel\n status: ScheduledEventStatus\n entity_type: ScheduledEventLocationType\n entity_id: Snowflake\n entity_metadata: ScheduledEventEntityMetadata\n creator: User\n user_count: int | None\n\n\nclass ScheduledEventEntityMetadata(TypedDict):\n location: str\n\n\nclass ScheduledEventSubscriber(TypedDict):\n guild_scheduled_event_id: Snowflake\n user: User\n member: Member | None\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class User(object):
def __init__(self, meta):
meta.update({'groups': meta.get('groups', []) + [meta['username']]})
self.meta = meta
@property
def username(self):
return self.meta['username']
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(object):
def __init__(self, meta):
meta.update({'groups': meta.get('groups', []) + [meta['username']]})
self.meta = meta
@property
def username(self):
return self.meta['username']
@property
def groups(self):
return self.meta['groups']
@property
def home_path(self):
return os.path.join('/home', self.username)
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(object):
def __init__(self, meta):
meta.update({'groups': meta.get('groups', []) + [meta['username']]})
self.meta = meta
@property
def username(self):
return self.meta['username']
@property
def groups(self):
return self.meta['groups']
@property
def home_path(self):
return os.path.join('/home', self.username)
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
<|reserved_special_token_0|>
def __repr__(self):
return repr(self.meta)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class User(object):
def __init__(self, meta):
meta.update({'groups': meta.get('groups', []) + [meta['username']]})
self.meta = meta
@property
def username(self):
return self.meta['username']
@property
def groups(self):
return self.meta['groups']
@property
def home_path(self):
return os.path.join('/home', self.username)
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
def __getitem__(self, key):
return self.meta.__getitem__(key)
def __repr__(self):
return repr(self.meta)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import os
class User(object):
def __init__(self, meta):
meta.update({
'groups': meta.get('groups', []) + [meta['username']]
})
self.meta = meta
@property
def username(self):
return self.meta['username']
@property
def groups(self):
return self.meta['groups']
@property
def home_path(self):
return os.path.join('/home', self.username)
@property
def is_root(self):
return self.username == 'root'
def own(self, node):
if self.is_root:
return True
return node.owner == self.username
def can_read(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_readable:
return True
if node.group in self.groups and node.group_readable:
return True
if node.other_readable:
return True
return False
def can_write(self, node):
if self.is_root:
return True
if self.own(node) and node.owner_writable:
return True
if node.group in self.groups and node.group_writable:
return True
if node.other_writable:
return True
return False
def can_create(self, node):
return self.can_write(node.parent)
def can_remove(self, node):
return self.can_write(node.parent)
def __getitem__(self, key):
return self.meta.__getitem__(key)
def __repr__(self):
return repr(self.meta)
root_user = User({'username': 'root'})
|
flexible
|
{
"blob_id": "aa47b7c74b9b6b8a7f014de4bd58236edeba485d",
"index": 5971,
"step-1": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n <mask token>\n <mask token>\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n <mask token>\n\n def __repr__(self):\n return repr(self.meta)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({'groups': meta.get('groups', []) + [meta['username']]})\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n\n def __getitem__(self, key):\n return self.meta.__getitem__(key)\n\n def __repr__(self):\n return repr(self.meta)\n\n\n<mask token>\n",
"step-5": "import os\n\n\nclass User(object):\n\n def __init__(self, meta):\n meta.update({\n 'groups': meta.get('groups', []) + [meta['username']]\n })\n self.meta = meta\n\n @property\n def username(self):\n return self.meta['username']\n\n @property\n def groups(self):\n return self.meta['groups']\n\n @property\n def home_path(self):\n return os.path.join('/home', self.username)\n\n @property\n def is_root(self):\n return self.username == 'root'\n\n def own(self, node):\n if self.is_root:\n return True\n return node.owner == self.username\n\n def can_read(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_readable:\n return True\n if node.group in self.groups and node.group_readable:\n return True\n if node.other_readable:\n return True\n return False\n\n def can_write(self, node):\n if self.is_root:\n return True\n if self.own(node) and node.owner_writable:\n return True\n if node.group in self.groups and node.group_writable:\n return True\n if node.other_writable:\n return True\n return False\n\n def can_create(self, node):\n return self.can_write(node.parent)\n\n def can_remove(self, node):\n return self.can_write(node.parent)\n\n def __getitem__(self, key):\n return self.meta.__getitem__(key)\n\n def __repr__(self):\n return repr(self.meta)\n\n\nroot_user = User({'username': 'root'})\n",
"step-ids": [
9,
11,
12,
13,
16
]
}
|
[
9,
11,
12,
13,
16
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def small_enough(array, limit):
counter = ''
for arr in array:
if arr <= limit:
counter += 'True,'
else:
counter += 'False,'
if 'False' in counter:
return False
else:
return True
<|reserved_special_token_1|>
#Small enough? - Beginner
# You will be given an array and a limit value.
# You must check that all values in the array are
# below or equal to the limit value. If they are,
# return true. Else, return false.
def small_enough(array, limit):
counter = ""
for arr in array:
if arr <= limit:
counter += "True,"
else:
counter += "False,"
if "False" in counter:
return False
else:
return True
|
flexible
|
{
"blob_id": "117b340b13b9b1c53d3df1646cd5924f0118ab5d",
"index": 5512,
"step-1": "<mask token>\n",
"step-2": "def small_enough(array, limit):\n counter = ''\n for arr in array:\n if arr <= limit:\n counter += 'True,'\n else:\n counter += 'False,'\n if 'False' in counter:\n return False\n else:\n return True\n",
"step-3": "#Small enough? - Beginner\n# You will be given an array and a limit value. \n# You must check that all values in the array are \n# below or equal to the limit value. If they are, \n# return true. Else, return false.\n\ndef small_enough(array, limit):\n counter = \"\"\n for arr in array:\n if arr <= limit:\n counter += \"True,\"\n else:\n counter += \"False,\"\n\n if \"False\" in counter:\n return False\n else:\n return True",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Solution(object):
def minimumTotal(self, triangle):
"""
:type triangle: List[List[int]]
:rtype: int
"""
t = triangle
if len(t) == 1:
return t[0][0]
ret = [0] * len(t)
ret[0] = t[0][0]
for i in range(1, len(t)):
for j in range(0, i + 1):
if j == 0:
old_v = ret[j]
ret[j] += t[i][j]
elif j == i:
ret[j] = old_v + t[i][j]
else:
val = min(old_v + t[i][j], ret[j] + t[i][j])
old_v = ret[j]
ret[j] = val
return min(ret)
|
normal
|
{
"blob_id": "84515ef6879b54b333f9afd48c6c4b7c43ff6957",
"index": 1068,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def minimumTotal(self, triangle):\n \"\"\"\n :type triangle: List[List[int]]\n :rtype: int\n \"\"\"\n t = triangle\n if len(t) == 1:\n return t[0][0]\n ret = [0] * len(t)\n ret[0] = t[0][0]\n for i in range(1, len(t)):\n for j in range(0, i + 1):\n if j == 0:\n old_v = ret[j]\n ret[j] += t[i][j]\n elif j == i:\n ret[j] = old_v + t[i][j]\n else:\n val = min(old_v + t[i][j], ret[j] + t[i][j])\n old_v = ret[j]\n ret[j] = val\n return min(ret)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
class Solution:
def search(self, nums: List[int], target: int) -> int:
n = len(nums)
left, right = 0, n-1
found = False
res = None
while left <= right:
mid = left + (right - left) // 2
if nums[mid] == target:
found = True
res = mid
break
elif nums[mid] >= nums[0]:
if target < nums[mid] and target >= nums[0]:
right = mid - 1
else:
left = mid + 1
# nums[mid] > target
elif nums[mid] < nums[0]:
if target > nums[mid] and target <= nums[-1]:
left = mid + 1
else:
right = mid - 1
if found:
print("res is: ", res)
return res
else:
print("res is: ", -1)
return -1
"""
https://leetcode.cn/submissions/detail/320442719/
执行用时:
36 ms
, 在所有 Python3 提交中击败了
73.39%
的用户
内存消耗:
15.2 MB
, 在所有 Python3 提交中击败了
62.74%
的用户
通过测试用例:
195 / 195
"""
|
normal
|
{
"blob_id": "1fe6fab717a77f13ddf7059ef0a5aaef217f0fb0",
"index": 5525,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def search(self, nums: List[int], target: int) ->int:\n n = len(nums)\n left, right = 0, n - 1\n found = False\n res = None\n while left <= right:\n mid = left + (right - left) // 2\n if nums[mid] == target:\n found = True\n res = mid\n break\n elif nums[mid] >= nums[0]:\n if target < nums[mid] and target >= nums[0]:\n right = mid - 1\n else:\n left = mid + 1\n elif nums[mid] < nums[0]:\n if target > nums[mid] and target <= nums[-1]:\n left = mid + 1\n else:\n right = mid - 1\n if found:\n print('res is: ', res)\n return res\n else:\n print('res is: ', -1)\n return -1\n\n\n<mask token>\n",
"step-4": "class Solution:\n def search(self, nums: List[int], target: int) -> int:\n \n n = len(nums)\n left, right = 0, n-1\n found = False\n res = None\n\n while left <= right:\n mid = left + (right - left) // 2\n if nums[mid] == target:\n found = True\n res = mid\n break\n elif nums[mid] >= nums[0]:\n if target < nums[mid] and target >= nums[0]:\n right = mid - 1\n else:\n left = mid + 1\n # nums[mid] > target\n elif nums[mid] < nums[0]:\n if target > nums[mid] and target <= nums[-1]:\n left = mid + 1\n else:\n right = mid - 1\n if found:\n print(\"res is: \", res)\n return res\n else:\n print(\"res is: \", -1)\n return -1\n \n\"\"\"\nhttps://leetcode.cn/submissions/detail/320442719/\n\n执行用时:\n36 ms\n, 在所有 Python3 提交中击败了\n73.39%\n的用户\n内存消耗:\n15.2 MB\n, 在所有 Python3 提交中击败了\n62.74%\n的用户\n通过测试用例:\n195 / 195\n\"\"\"\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
<|reserved_special_token_1|>
from flask import Flask, jsonify, request, render_template
from werkzeug import secure_filename
import os
from utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'failed', 'message':
'Something Went Wrong !!'})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == 'POST':
try:
files = request.files.getlist('file')
print('files', files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join(
'static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == 'docx' or extension[-1] == 'doc':
if convert_doc_to_pdf(file_path):
print('File Converted to PDF Successfully !!')
else:
raise Exception('Something Went Wrong !')
return jsonify({'status': 'success', 'message':
'File Uploaded Successfully !!'})
else:
return jsonify({'status': 'failed', 'message':
'Format Not Allowed !!'})
else:
return jsonify({'status': 'failed'})
except Exception as e:
print('Exception Occurred', e)
return jsonify({'status': 'exception', 'message':
'Something Went Wrong !!'})
else:
return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
<|reserved_special_token_1|>
from flask import Flask, jsonify, request, render_template
from werkzeug import secure_filename
import os
from utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
""" Renders Index.html """
try:
return render_template('index.html')
except Exception as e:
print("Exception Occurred", e)
return jsonify({"status": "failed", "message": "Something Went Wrong !!"})
@app.route('/upload', methods=['POST'])
def file_converter():
"""
Function Processing Steps:
Step-1 : Check uploaded file extension ,if accepted format process further
Step-2 : Save the files into uploads folder
Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder
Note : If file is already in pdf format than file will directly save in converted_files
folder without other action.
"""
if request.method == "POST":
try:
files = request.files.getlist('file')
print("files", files)
if len(files) > 0:
for data in files:
if allowed_file(data.filename):
filename = secure_filename(data.filename)
extension = filename.split('.')
file_path = os.path.join('static/uploads', filename)
if extension[-1] == 'pdf':
pdf_file_path = os.path.join('static/converted_files', filename)
data.save(pdf_file_path)
else:
data.save(file_path)
if extension[-1] == 'html':
if convert_html_to_pdf(file_path, extension[0]):
print("File Converted to PDF Successfully !!")
else:
raise Exception('Something Went Wrong !')
elif extension[-1] == "docx" or extension[-1] == "doc":
if convert_doc_to_pdf(file_path):
print("File Converted to PDF Successfully !!")
else:
raise Exception('Something Went Wrong !')
return jsonify({"status": "success", "message": "File Uploaded Successfully !!"})
else:
return jsonify({"status": "failed", "message": "Format Not Allowed !!"})
else:
return jsonify({"status": "failed"})
except Exception as e:
print("Exception Occurred", e)
return jsonify({"status": "exception", "message": "Something Went Wrong !!"})
else:
return jsonify({"status": "failed", "message": "Method Not Allowed !"})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9000)
|
flexible
|
{
"blob_id": "860f77b031c815df40a16669dae8d32af4afa5bf",
"index": 868,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n \"\"\" Renders Index.html \"\"\"\n try:\n return render_template('index.html')\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'failed', 'message':\n 'Something Went Wrong !!'})\n\n\[email protected]('/upload', methods=['POST'])\ndef file_converter():\n \"\"\"\n Function Processing Steps:\n Step-1 : Check uploaded file extension ,if accepted format process further\n Step-2 : Save the files into uploads folder\n Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder\n\n Note : If file is already in pdf format than file will directly save in converted_files\n folder without other action.\n \"\"\"\n if request.method == 'POST':\n try:\n files = request.files.getlist('file')\n print('files', files)\n if len(files) > 0:\n for data in files:\n if allowed_file(data.filename):\n filename = secure_filename(data.filename)\n extension = filename.split('.')\n file_path = os.path.join('static/uploads', filename)\n if extension[-1] == 'pdf':\n pdf_file_path = os.path.join(\n 'static/converted_files', filename)\n data.save(pdf_file_path)\n else:\n data.save(file_path)\n if extension[-1] == 'html':\n if convert_html_to_pdf(file_path, extension[0]):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n elif extension[-1] == 'docx' or extension[-1] == 'doc':\n if convert_doc_to_pdf(file_path):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n return jsonify({'status': 'success', 'message':\n 'File Uploaded Successfully !!'})\n else:\n return jsonify({'status': 'failed', 'message':\n 'Format Not Allowed !!'})\n else:\n return jsonify({'status': 'failed'})\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'exception', 'message':\n 'Something Went Wrong !!'})\n else:\n return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n \"\"\" Renders Index.html \"\"\"\n try:\n return render_template('index.html')\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'failed', 'message':\n 'Something Went Wrong !!'})\n\n\[email protected]('/upload', methods=['POST'])\ndef file_converter():\n \"\"\"\n Function Processing Steps:\n Step-1 : Check uploaded file extension ,if accepted format process further\n Step-2 : Save the files into uploads folder\n Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder\n\n Note : If file is already in pdf format than file will directly save in converted_files\n folder without other action.\n \"\"\"\n if request.method == 'POST':\n try:\n files = request.files.getlist('file')\n print('files', files)\n if len(files) > 0:\n for data in files:\n if allowed_file(data.filename):\n filename = secure_filename(data.filename)\n extension = filename.split('.')\n file_path = os.path.join('static/uploads', filename)\n if extension[-1] == 'pdf':\n pdf_file_path = os.path.join(\n 'static/converted_files', filename)\n data.save(pdf_file_path)\n else:\n data.save(file_path)\n if extension[-1] == 'html':\n if convert_html_to_pdf(file_path, extension[0]):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n elif extension[-1] == 'docx' or extension[-1] == 'doc':\n if convert_doc_to_pdf(file_path):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n return jsonify({'status': 'success', 'message':\n 'File Uploaded Successfully !!'})\n else:\n return jsonify({'status': 'failed', 'message':\n 'Format Not Allowed !!'})\n else:\n return jsonify({'status': 'failed'})\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'exception', 'message':\n 'Something Went Wrong !!'})\n else:\n return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9000)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n \"\"\" Renders Index.html \"\"\"\n try:\n return render_template('index.html')\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'failed', 'message':\n 'Something Went Wrong !!'})\n\n\[email protected]('/upload', methods=['POST'])\ndef file_converter():\n \"\"\"\n Function Processing Steps:\n Step-1 : Check uploaded file extension ,if accepted format process further\n Step-2 : Save the files into uploads folder\n Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder\n\n Note : If file is already in pdf format than file will directly save in converted_files\n folder without other action.\n \"\"\"\n if request.method == 'POST':\n try:\n files = request.files.getlist('file')\n print('files', files)\n if len(files) > 0:\n for data in files:\n if allowed_file(data.filename):\n filename = secure_filename(data.filename)\n extension = filename.split('.')\n file_path = os.path.join('static/uploads', filename)\n if extension[-1] == 'pdf':\n pdf_file_path = os.path.join(\n 'static/converted_files', filename)\n data.save(pdf_file_path)\n else:\n data.save(file_path)\n if extension[-1] == 'html':\n if convert_html_to_pdf(file_path, extension[0]):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n elif extension[-1] == 'docx' or extension[-1] == 'doc':\n if convert_doc_to_pdf(file_path):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n return jsonify({'status': 'success', 'message':\n 'File Uploaded Successfully !!'})\n else:\n return jsonify({'status': 'failed', 'message':\n 'Format Not Allowed !!'})\n else:\n return jsonify({'status': 'failed'})\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'exception', 'message':\n 'Something Went Wrong !!'})\n else:\n return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9000)\n",
"step-4": "from flask import Flask, jsonify, request, render_template\nfrom werkzeug import secure_filename\nimport os\nfrom utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n \"\"\" Renders Index.html \"\"\"\n try:\n return render_template('index.html')\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'failed', 'message':\n 'Something Went Wrong !!'})\n\n\[email protected]('/upload', methods=['POST'])\ndef file_converter():\n \"\"\"\n Function Processing Steps:\n Step-1 : Check uploaded file extension ,if accepted format process further\n Step-2 : Save the files into uploads folder\n Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder\n\n Note : If file is already in pdf format than file will directly save in converted_files\n folder without other action.\n \"\"\"\n if request.method == 'POST':\n try:\n files = request.files.getlist('file')\n print('files', files)\n if len(files) > 0:\n for data in files:\n if allowed_file(data.filename):\n filename = secure_filename(data.filename)\n extension = filename.split('.')\n file_path = os.path.join('static/uploads', filename)\n if extension[-1] == 'pdf':\n pdf_file_path = os.path.join(\n 'static/converted_files', filename)\n data.save(pdf_file_path)\n else:\n data.save(file_path)\n if extension[-1] == 'html':\n if convert_html_to_pdf(file_path, extension[0]):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n elif extension[-1] == 'docx' or extension[-1] == 'doc':\n if convert_doc_to_pdf(file_path):\n print('File Converted to PDF Successfully !!')\n else:\n raise Exception('Something Went Wrong !')\n return jsonify({'status': 'success', 'message':\n 'File Uploaded Successfully !!'})\n else:\n return jsonify({'status': 'failed', 'message':\n 'Format Not Allowed !!'})\n else:\n return jsonify({'status': 'failed'})\n except Exception as e:\n print('Exception Occurred', e)\n return jsonify({'status': 'exception', 'message':\n 'Something Went Wrong !!'})\n else:\n return jsonify({'status': 'failed', 'message': 'Method Not Allowed !'})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9000)\n",
"step-5": "from flask import Flask, jsonify, request, render_template\nfrom werkzeug import secure_filename\nimport os\n\nfrom utils import allowed_file, convert_html_to_pdf, convert_doc_to_pdf\n\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n \"\"\" Renders Index.html \"\"\"\n try:\n return render_template('index.html')\n except Exception as e:\n print(\"Exception Occurred\", e)\n return jsonify({\"status\": \"failed\", \"message\": \"Something Went Wrong !!\"})\n\n\[email protected]('/upload', methods=['POST'])\ndef file_converter():\n \"\"\"\n Function Processing Steps:\n Step-1 : Check uploaded file extension ,if accepted format process further\n Step-2 : Save the files into uploads folder\n Step-3 : Convert the html,doc and docx files into pdf file and stores into converted_files folder\n\n Note : If file is already in pdf format than file will directly save in converted_files\n folder without other action.\n \"\"\"\n if request.method == \"POST\":\n try:\n files = request.files.getlist('file')\n print(\"files\", files)\n if len(files) > 0:\n for data in files:\n if allowed_file(data.filename):\n filename = secure_filename(data.filename)\n extension = filename.split('.')\n file_path = os.path.join('static/uploads', filename)\n\n if extension[-1] == 'pdf':\n pdf_file_path = os.path.join('static/converted_files', filename)\n data.save(pdf_file_path)\n else:\n data.save(file_path)\n\n if extension[-1] == 'html':\n if convert_html_to_pdf(file_path, extension[0]):\n print(\"File Converted to PDF Successfully !!\")\n else:\n raise Exception('Something Went Wrong !')\n\n elif extension[-1] == \"docx\" or extension[-1] == \"doc\":\n if convert_doc_to_pdf(file_path):\n print(\"File Converted to PDF Successfully !!\")\n else:\n raise Exception('Something Went Wrong !')\n return jsonify({\"status\": \"success\", \"message\": \"File Uploaded Successfully !!\"})\n\n else:\n return jsonify({\"status\": \"failed\", \"message\": \"Format Not Allowed !!\"})\n else:\n return jsonify({\"status\": \"failed\"})\n except Exception as e:\n print(\"Exception Occurred\", e)\n return jsonify({\"status\": \"exception\", \"message\": \"Something Went Wrong !!\"})\n else:\n return jsonify({\"status\": \"failed\", \"message\": \"Method Not Allowed !\"})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9000)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class LoginAPI(generics.GenericAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({'success': True}, status=status.HTTP_200_OK)
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({'success': False}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginAPI(generics.GenericAPIView):
<|reserved_special_token_0|>
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({'user': UserSerializer(user, context=self.
get_serializer_context()).data, 'token': AuthToken.objects.
create(user)[1]})
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({'success': True}, status=status.HTTP_200_OK)
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({'success': False}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({'user': UserSerializer(user, context=self.
get_serializer_context()).data, 'token': AuthToken.objects.
create(user)[1]})
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({'success': True}, status=status.HTTP_200_OK)
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({'success': False}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
from knox.models import AuthToken
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer
class RegisterAPI(generics.CreateAPIView):
permission_classes = [permissions.AllowAny]
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({'user': UserSerializer(user, context=self.
get_serializer_context()).data, 'token': AuthToken.objects.
create(user)[1]})
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({'user': UserSerializer(user, context=self.
get_serializer_context()).data, 'token': AuthToken.objects.
create(user)[1]})
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({'success': True}, status=status.HTTP_200_OK)
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [permissions.IsAuthenticated]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({'success': False}, status=status.HTTP_200_OK)
<|reserved_special_token_1|>
from knox.models import AuthToken
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer
# Register API
class RegisterAPI(generics.CreateAPIView):
permission_classes = [
permissions.AllowAny
]
serializer_class = RegisterSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
# Login API
class LoginAPI(generics.GenericAPIView):
serializer_class = LoginSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data
return Response({
"user": UserSerializer(user, context=self.get_serializer_context()).data,
"token": AuthToken.objects.create(user)[1]
})
class ChangePasswordAPI(generics.UpdateAPIView):
permission_classes = [
permissions.IsAuthenticated
]
serializer_class = ChangePasswordSerializer
def update(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = request.user
user.set_password(serializer.validated_data['new_password'])
user.save()
return Response({
'success': True,
}, status=status.HTTP_200_OK)
# Get User API
class UserAPI(generics.RetrieveUpdateAPIView):
permission_classes = [
permissions.IsAuthenticated,
]
serializer_class = UserSerializer
def get_object(self):
return self.request.user
def update(self, request, *args, **kwargs):
user = self.get_object()
first_name = request.data.get('first_name')
last_name = request.data.get('last_name')
mobile = request.data.get('mobile')
print(first_name, last_name, mobile)
user.first_name = first_name
user.last_name = last_name
user.mobile = mobile
user.save()
return Response({
"success": False
}, status=status.HTTP_200_OK)
|
flexible
|
{
"blob_id": "5d6ec1b23dcbc935fe80dd09a2e967eb7e37a363",
"index": 5645,
"step-1": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n <mask token>\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-2": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n <mask token>\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-3": "<mask token>\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-4": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [permissions.AllowAny]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({'user': UserSerializer(user, context=self.\n get_serializer_context()).data, 'token': AuthToken.objects.\n create(user)[1]})\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({'success': True}, status=status.HTTP_200_OK)\n\n\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n print(first_name, last_name, mobile)\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n return Response({'success': False}, status=status.HTTP_200_OK)\n",
"step-5": "from knox.models import AuthToken\nfrom rest_framework import generics, permissions, status\nfrom rest_framework.response import Response\n\nfrom accounts.serializers import UserSerializer, RegisterSerializer, LoginSerializer, ChangePasswordSerializer\n\n\n# Register API\n\nclass RegisterAPI(generics.CreateAPIView):\n permission_classes = [\n permissions.AllowAny\n ]\n serializer_class = RegisterSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\n# Login API\nclass LoginAPI(generics.GenericAPIView):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data\n return Response({\n \"user\": UserSerializer(user, context=self.get_serializer_context()).data,\n \"token\": AuthToken.objects.create(user)[1]\n })\n\n\nclass ChangePasswordAPI(generics.UpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated\n ]\n serializer_class = ChangePasswordSerializer\n\n def update(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = request.user\n user.set_password(serializer.validated_data['new_password'])\n user.save()\n return Response({\n 'success': True,\n }, status=status.HTTP_200_OK)\n\n\n# Get User API\nclass UserAPI(generics.RetrieveUpdateAPIView):\n permission_classes = [\n permissions.IsAuthenticated,\n ]\n serializer_class = UserSerializer\n\n def get_object(self):\n return self.request.user\n\n def update(self, request, *args, **kwargs):\n user = self.get_object()\n first_name = request.data.get('first_name')\n last_name = request.data.get('last_name')\n mobile = request.data.get('mobile')\n\n print(first_name, last_name, mobile)\n\n user.first_name = first_name\n user.last_name = last_name\n user.mobile = mobile\n user.save()\n\n return Response({\n \"success\": False\n }, status=status.HTTP_200_OK)\n",
"step-ids": [
8,
9,
10,
14,
15
]
}
|
[
8,
9,
10,
14,
15
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:
reader = csv.reader(users_csv)
d = {}
for row in reader:
userId, profileName = row
if profileName == 'A Customer':
continue
value = d.get(profileName)
if not value:
d.setdefault(profileName, userId)
elif value != userId:
print(f'{userId}, {value}, {profileName}')
<|reserved_special_token_1|>
import csv
with open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:
reader = csv.reader(users_csv)
d = {}
for row in reader:
userId, profileName = row
if profileName == 'A Customer':
continue
value = d.get(profileName)
if not value:
d.setdefault(profileName, userId)
elif value != userId:
print(f'{userId}, {value}, {profileName}')
<|reserved_special_token_1|>
import csv
with open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:
reader = csv.reader(users_csv)
d = {}
for row in reader:
userId, profileName = row
if profileName == 'A Customer':
continue
value = d.get(profileName)
if not value:
d.setdefault(profileName, userId)
else:
if value != userId:
print(f'{userId}, {value}, {profileName}')
|
flexible
|
{
"blob_id": "3b77f7ea5137174e6723368502659390ea064c5a",
"index": 8968,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n elif value != userId:\n print(f'{userId}, {value}, {profileName}')\n",
"step-3": "import csv\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n elif value != userId:\n print(f'{userId}, {value}, {profileName}')\n",
"step-4": "import csv\n\nwith open('./csvs/users.csv', encoding='utf-8', newline='') as users_csv:\n reader = csv.reader(users_csv)\n d = {}\n for row in reader:\n userId, profileName = row\n if profileName == 'A Customer':\n continue\n value = d.get(profileName)\n if not value:\n d.setdefault(profileName, userId)\n else:\n if value != userId:\n print(f'{userId}, {value}, {profileName}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.shortcuts import render, get_object_or_404, redirect
#from emailupdate.forms import emailupdate_form
from forms import EmailForm
from django.utils import timezone
def index(request):
if request.method == "POST":
form = EmailForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.signup_date = timezone.now()
post.email_confirmed = True
post.save()
return redirect('/emailupdate/thanks/')
else:
form_class = EmailForm
return render(request, 'emailupdate/emailupdate.html', {
'form': form_class,
})
def thanks(request):
return render(request, 'emailupdate/emailupdate_thanks.html')
|
normal
|
{
"blob_id": "f2cdee7e5eebaeeb784cb901c3ac6301e90ac7b9",
"index": 866,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef index(request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.signup_date = timezone.now()\n post.email_confirmed = True\n post.save()\n return redirect('/emailupdate/thanks/')\n else:\n form_class = EmailForm\n return render(request, 'emailupdate/emailupdate.html', {'form':\n form_class})\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef index(request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.signup_date = timezone.now()\n post.email_confirmed = True\n post.save()\n return redirect('/emailupdate/thanks/')\n else:\n form_class = EmailForm\n return render(request, 'emailupdate/emailupdate.html', {'form':\n form_class})\n\n\ndef thanks(request):\n return render(request, 'emailupdate/emailupdate_thanks.html')\n",
"step-4": "from django.shortcuts import render, get_object_or_404, redirect\nfrom forms import EmailForm\nfrom django.utils import timezone\n\n\ndef index(request):\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.signup_date = timezone.now()\n post.email_confirmed = True\n post.save()\n return redirect('/emailupdate/thanks/')\n else:\n form_class = EmailForm\n return render(request, 'emailupdate/emailupdate.html', {'form':\n form_class})\n\n\ndef thanks(request):\n return render(request, 'emailupdate/emailupdate_thanks.html')\n",
"step-5": "from django.shortcuts import render, get_object_or_404, redirect\n#from emailupdate.forms import emailupdate_form\nfrom forms import EmailForm\nfrom django.utils import timezone\n\ndef index(request):\n\tif request.method == \"POST\":\n\t\tform = EmailForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.signup_date = timezone.now()\n\t\t\tpost.email_confirmed = True\n\t\t\tpost.save()\n\t\t\treturn redirect('/emailupdate/thanks/')\n\telse:\n\t\tform_class = EmailForm\n\t\treturn render(request, 'emailupdate/emailupdate.html', {\n\t\t\t'form': form_class,\n\t\t})\t\n\ndef thanks(request):\n\treturn render(request, 'emailupdate/emailupdate_thanks.html')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
##Extras
def permissao():
editor = False
for row in session.auth.user_groups:
grupo = session.auth.user_groups[row]
if (grupo == "gerenciador") or (grupo == "administrador"):
editor = True
return editor
|
normal
|
{
"blob_id": "70de2bed00aabe3805c3a19da004713d4109568a",
"index": 9036,
"step-1": "<mask token>\n",
"step-2": "def permissao():\n editor = False\n for row in session.auth.user_groups:\n grupo = session.auth.user_groups[row]\n if grupo == 'gerenciador' or grupo == 'administrador':\n editor = True\n return editor\n",
"step-3": "##Extras\n\ndef permissao():\n\teditor = False\n\tfor row in session.auth.user_groups:\n\t\tgrupo = session.auth.user_groups[row]\n\t\tif (grupo == \"gerenciador\") or (grupo == \"administrador\"):\n\t\t\teditor = True\n\treturn editor",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import numpy as np
from scipy import stats
from scipy import interpolate
from math import factorial
from scipy import signal
"""
A continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,
it should work for most datasets.
Parameters
----------
lowerBound: The lowest value of the scale factor to use in the wavelet transform
upperBound: The highest value of the scale factor to use in the wavelet transform
steps: The number of scale factors we want between the highest and lowest bounds
rowWindow: The maximum number of rows that a ridge line can be discontinuous before it is
terminated. I.e. the maximum number of scale factors it can deviate.
colWindow: The maximum number of columns that a ridge line can wander before it is terminated.
I.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.
"""
# CWT Transform parameters
lowerBound = 1
upperBound = 70
steps = 90
# Ridge line filtering parameters
rowWindow = 2
columnWindow = 5
class _spectra:
def __init__(self,x,y):
self.x = x
self.y = y
def x(self):
return waveNumbers
def y(self):
return intensities
"""
Simple helper function for finding all of the maxima in the 2D array returned by the wavelet
transform. Works on the basis of a simple comparison between neighbouring elements. These
values form the initial basis for the ridge lines.
"""
def _findMaxima1D(CWTArray):
maximas = np.zeros(CWTArray.size,dtype=(float,3))
# Populate the maxima array with a tuple of the coordinates and the values of the maxima
count = 0
for j,row in enumerate(CWTArray):
for i,element in enumerate(row):
try:
if element > row[i-1] and element > row[i+1]:
maximas[count]= ((steps-j,i,element))
count += 1
except IndexError:
pass
return np.vstack(maximas[:count])
"""
Filter the ridge lines found from the maxima of the CWT coefficient array based on a set
parameters, namely the maximum deviations in wavenumber and scale space. Any lines which are
found from this criteria are considered to be peaks and further evaluated in the following
steps.
"""
def _filterRidgeLines(maximaArray,rowMax,colMax):
# Helper to prevent duplicating ridge lines
def checkValues(value, ridgeLines):
for lines in ridgeLines:
for points in lines:
if value in points:
return True
return False
ridgeLines = []
# Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)
for i,row in enumerate(maximaArray):
ridge = [] # For each maxima start a ridge line
colPos = row[1] # Get the column position of the current maxima
rowPos = row[0] # Get the row position of the current maxima
# If this value is already part of another ridge line, move to the next value
if checkValues(colPos, ridgeLines):
continue
for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima
if nextRows[0] == rowPos: # If the scale factors are the same, skip
continue
if np.abs(colPos - nextRows[1]) <= colMax and \
np.abs(rowPos - nextRows[0]) <= rowMax:
ridge.append((rowPos,colPos,nextRows[2]))
rowPos = nextRows[0]
colPos = nextRows[1]
# If the ridge lines run all the way to the lowest scale factors, add them to the list
if len(ridge) != 0:
if ridge[-1][0] <= 2:
ridgeLines.append(ridge)
return ridgeLines
"""
For each of the ridge lines found from the filtered CWT array, determine the other
characteristics of the peaks.
The position of the peak is determined from the position of the maxima in the ridge
line.
"""
def getPeakInfo(ridgeLines,data,waveletCoeff):
# For each of the ridge lines we have found, locate the positions of the maxima. These
# correspond to the peak centers.
peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\
('cwtCoeff','f'),('SNR','f'),('length','uint8'),\
('intensity','f'),('wavenumber','f')])
# For each of the ridge lines, add the position of the peak center and the length of the
# line. These are useful for filtering peaks later.
for i,lines in enumerate(ridgeLines):
# Find the index of the maximum CWT coefficient. This is the peak center.
maximum = np.argmax(zip(*lines)[2])
peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\
data.x[lines[maximum][1]],data.y[lines[maximum][1]]
# Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is
# defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.
for i, peaks in enumerate(peakInfo):
SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])
if len(SNR) == 0:
peakInfo['SNR'][i] = 0
else:
SNR = stats.scoreatpercentile(SNR, 95)
peakInfo['SNR'][i] = SNR
return peakInfo
"""
Processes spectral data and returns a structured array of peak information. Peak can then be
filtered based on ridge line length, signal to noise ratio and scale values.
"""
def getPeaks(waveNumbers,intensities):
data = _spectra(waveNumbers,intensities)
# Take the CWT of the spectra. Trim the result to remove padding.
waveletCoeff = signal.cwt(intensities, signal.ricker, \
np.linspace(lowerBound,upperBound,steps))
# Flip the matrix so the highest wavelet coefficient is the top row
waveletCoeff = np.flipud(waveletCoeff)
# Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines
# takes a (scaleFactor,3) array of positions and values of maxima.
ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)
# Populate a structured array with peak information
peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)
return peakInfo
|
normal
|
{
"blob_id": "8f5d9918260e2f50fb229a7067f820a186101b99",
"index": 1080,
"step-1": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n <mask token>\n\n def y(self):\n return intensities\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-3": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-4": "<mask token>\n\n\nclass _spectra:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\n<mask token>\n\n\ndef _findMaxima1D(CWTArray):\n maximas = np.zeros(CWTArray.size, dtype=(float, 3))\n count = 0\n for j, row in enumerate(CWTArray):\n for i, element in enumerate(row):\n try:\n if element > row[i - 1] and element > row[i + 1]:\n maximas[count] = steps - j, i, element\n count += 1\n except IndexError:\n pass\n return np.vstack(maximas[:count])\n\n\n<mask token>\n\n\ndef _filterRidgeLines(maximaArray, rowMax, colMax):\n\n def checkValues(value, ridgeLines):\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n ridgeLines = []\n for i, row in enumerate(maximaArray):\n ridge = []\n colPos = row[1]\n rowPos = row[0]\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:, :]):\n if nextRows[0] == rowPos:\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and np.abs(rowPos -\n nextRows[0]) <= rowMax:\n ridge.append((rowPos, colPos, nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n return ridgeLines\n\n\n<mask token>\n\n\ndef getPeakInfo(ridgeLines, data, waveletCoeff):\n peakInfo = np.zeros(len(ridgeLines), dtype=[('position', 'int32'), (\n 'scale', 'int32'), ('cwtCoeff', 'f'), ('SNR', 'f'), ('length',\n 'uint8'), ('intensity', 'f'), ('wavenumber', 'f')])\n for i, lines in enumerate(ridgeLines):\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1], lines[maximum][0], lines[maximum][2\n ], 0, len(lines), data.x[lines[maximum][1]], data.y[lines[\n maximum][1]]\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1, peaks[0] - 15:peaks[0] + 15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n return peakInfo\n\n\n<mask token>\n\n\ndef getPeaks(waveNumbers, intensities):\n data = _spectra(waveNumbers, intensities)\n waveletCoeff = signal.cwt(intensities, signal.ricker, np.linspace(\n lowerBound, upperBound, steps))\n waveletCoeff = np.flipud(waveletCoeff)\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),\n columnWindow, rowWindow)\n peakInfo = getPeakInfo(ridgeLines, data, waveletCoeff)\n return peakInfo\n",
"step-5": "import numpy as np\nfrom scipy import stats\nfrom scipy import interpolate\nfrom math import factorial\nfrom scipy import signal\n\n\"\"\"\n\nA continuous wavelet transform based peak finder. Tested exclusively on Raman spectra, however,\nit should work for most datasets.\n\nParameters\n----------\n\nlowerBound: The lowest value of the scale factor to use in the wavelet transform\nupperBound: The highest value of the scale factor to use in the wavelet transform\nsteps: The number of scale factors we want between the highest and lowest bounds\n\nrowWindow: The maximum number of rows that a ridge line can be discontinuous before it is\nterminated. I.e. the maximum number of scale factors it can deviate.\n\ncolWindow: The maximum number of columns that a ridge line can wander before it is terminated.\nI.e. the maximum number of wavenumbers (or a similar X value) that the ridge line can deviate.\n\n\"\"\"\n\n# CWT Transform parameters\nlowerBound = 1\nupperBound = 70\nsteps = 90\n\n# Ridge line filtering parameters\nrowWindow = 2\ncolumnWindow = 5\n\nclass _spectra:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def x(self):\n return waveNumbers\n\n def y(self):\n return intensities\n\n\"\"\"\n\nSimple helper function for finding all of the maxima in the 2D array returned by the wavelet\ntransform. Works on the basis of a simple comparison between neighbouring elements. These\nvalues form the initial basis for the ridge lines.\n\n\"\"\"\ndef _findMaxima1D(CWTArray):\n\n maximas = np.zeros(CWTArray.size,dtype=(float,3))\n\n # Populate the maxima array with a tuple of the coordinates and the values of the maxima\n count = 0\n for j,row in enumerate(CWTArray):\n for i,element in enumerate(row):\n try:\n if element > row[i-1] and element > row[i+1]:\n maximas[count]= ((steps-j,i,element))\n count += 1\n except IndexError:\n pass\n\n return np.vstack(maximas[:count])\n\n\"\"\"\n\nFilter the ridge lines found from the maxima of the CWT coefficient array based on a set\nparameters, namely the maximum deviations in wavenumber and scale space. Any lines which are\nfound from this criteria are considered to be peaks and further evaluated in the following\nsteps.\n\n\"\"\"\ndef _filterRidgeLines(maximaArray,rowMax,colMax):\n\n # Helper to prevent duplicating ridge lines\n def checkValues(value, ridgeLines):\n\n for lines in ridgeLines:\n for points in lines:\n if value in points:\n return True\n return False\n\n ridgeLines = []\n\n # Maxima array is a n row, 1 column array containing tuples of (scaleFactor, column)\n for i,row in enumerate(maximaArray):\n ridge = [] # For each maxima start a ridge line\n colPos = row[1] # Get the column position of the current maxima\n rowPos = row[0] # Get the row position of the current maxima\n # If this value is already part of another ridge line, move to the next value\n if checkValues(colPos, ridgeLines):\n continue\n for j, nextRows in enumerate(maximaArray[i:,:]): # Look through the subsequent maxima\n if nextRows[0] == rowPos: # If the scale factors are the same, skip\n continue\n if np.abs(colPos - nextRows[1]) <= colMax and \\\n np.abs(rowPos - nextRows[0]) <= rowMax:\n ridge.append((rowPos,colPos,nextRows[2]))\n rowPos = nextRows[0]\n colPos = nextRows[1]\n\n # If the ridge lines run all the way to the lowest scale factors, add them to the list\n if len(ridge) != 0:\n if ridge[-1][0] <= 2:\n ridgeLines.append(ridge)\n\n return ridgeLines\n\n\"\"\"\n\nFor each of the ridge lines found from the filtered CWT array, determine the other\ncharacteristics of the peaks.\n\nThe position of the peak is determined from the position of the maxima in the ridge\nline.\n\n\"\"\"\ndef getPeakInfo(ridgeLines,data,waveletCoeff):\n\n # For each of the ridge lines we have found, locate the positions of the maxima. These\n # correspond to the peak centers.\n peakInfo = np.zeros(len(ridgeLines),dtype=[('position','int32'),('scale','int32'),\\\n ('cwtCoeff','f'),('SNR','f'),('length','uint8'),\\\n ('intensity','f'),('wavenumber','f')])\n\n # For each of the ridge lines, add the position of the peak center and the length of the\n # line. These are useful for filtering peaks later.\n for i,lines in enumerate(ridgeLines):\n # Find the index of the maximum CWT coefficient. This is the peak center.\n maximum = np.argmax(zip(*lines)[2])\n peakInfo[i] = lines[maximum][1],lines[maximum][0],lines[maximum][2],0,len(lines),\\\n data.x[lines[maximum][1]],data.y[lines[maximum][1]]\n\n # Calculate the local SNR of each peak within a window of 30 pixels of the peak. The SNR is\n # defined as the 95th quantile of the absolute values of the lowest scale factor coefficients.\n for i, peaks in enumerate(peakInfo):\n SNR = np.abs(waveletCoeff[-1,peaks[0]-15:peaks[0]+15])\n if len(SNR) == 0:\n peakInfo['SNR'][i] = 0\n else:\n SNR = stats.scoreatpercentile(SNR, 95)\n peakInfo['SNR'][i] = SNR\n\n return peakInfo\n\n\"\"\"\n\nProcesses spectral data and returns a structured array of peak information. Peak can then be\nfiltered based on ridge line length, signal to noise ratio and scale values.\n\n\"\"\"\ndef getPeaks(waveNumbers,intensities):\n\n data = _spectra(waveNumbers,intensities)\n\n # Take the CWT of the spectra. Trim the result to remove padding.\n waveletCoeff = signal.cwt(intensities, signal.ricker, \\\n np.linspace(lowerBound,upperBound,steps))\n\n # Flip the matrix so the highest wavelet coefficient is the top row\n waveletCoeff = np.flipud(waveletCoeff)\n\n # Find the ridge lines connecting the maxima in the wavelet coefficient array. Filter ridge lines\n # takes a (scaleFactor,3) array of positions and values of maxima.\n ridgeLines = _filterRidgeLines(_findMaxima1D(waveletCoeff),columnWindow,rowWindow)\n\n # Populate a structured array with peak information\n peakInfo = getPeakInfo(ridgeLines,data,waveletCoeff)\n\n return peakInfo\n",
"step-ids": [
3,
5,
6,
8,
11
]
}
|
[
3,
5,
6,
8,
11
] |
<|reserved_special_token_0|>
@app.route('/')
def signin():
return render_template('index.html')
<|reserved_special_token_0|>
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM users WHERE email = %(em)s;'
data = {'em': request.form['email']}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form[
'password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash('Email and/or password does not match.')
return redirect('/')
else:
flash('Please enter your registered Email.')
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
<|reserved_special_token_0|>
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash('quotes are required to be longer than 10 characters.')
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = (
'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'
)
data = {'quo': request.form['content'], 'auth': request.form[
'author'], 'from': session['id_users']}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id, thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'
data = {'id': id}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route('/edit')
def edit():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * From users WHERE id_users = %(id)s'
data = {'id': session['id_users']}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users=users_table)
@app.route('/update', methods=['POST'])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash('please enter your first name.')
if len(request.form['l_name']) < 3:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if not is_valid:
return redirect('/edit')
else:
flash('sucessfully updated')
mysql = connectToMySQL(DATABASE)
query = (
'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'
)
data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],
'em': request.form['email'], 'id': session['id_users']}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route('/my_posts')
def my_post():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'
data = {'id': session['id_users']}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes=my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def signin():
return render_template('index.html')
<|reserved_special_token_0|>
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM users WHERE email = %(em)s;'
data = {'em': request.form['email']}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form[
'password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash('Email and/or password does not match.')
return redirect('/')
else:
flash('Please enter your registered Email.')
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'
join = mysql.query_db(query)
return render_template('quotes.html', joined=join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash('quotes are required to be longer than 10 characters.')
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = (
'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'
)
data = {'quo': request.form['content'], 'auth': request.form[
'author'], 'from': session['id_users']}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id, thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'
data = {'id': id}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route('/edit')
def edit():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * From users WHERE id_users = %(id)s'
data = {'id': session['id_users']}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users=users_table)
@app.route('/update', methods=['POST'])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash('please enter your first name.')
if len(request.form['l_name']) < 3:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if not is_valid:
return redirect('/edit')
else:
flash('sucessfully updated')
mysql = connectToMySQL(DATABASE)
query = (
'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'
)
data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],
'em': request.form['email'], 'id': session['id_users']}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route('/my_posts')
def my_post():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'
data = {'id': session['id_users']}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes=my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def signin():
return render_template('index.html')
@app.route('/register', methods=['POST'])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash('please enter your first name.')
if len(request.form['last_name']) < 2:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if len(request.form['password']) < 8:
is_valid = False
flash('password must be atleast 8 characters long.')
if request.form['password'] != request.form['confirm_password']:
is_valid = False
flash('passwords do not match.')
if not is_valid:
return redirect('/')
else:
flash('sucessfully added')
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = (
'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'
)
data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[
'first_name'], 'ln': request.form['last_name']}
id_users = mysql.query_db(query, data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM users WHERE email = %(em)s;'
data = {'em': request.form['email']}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form[
'password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash('Email and/or password does not match.')
return redirect('/')
else:
flash('Please enter your registered Email.')
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'
join = mysql.query_db(query)
return render_template('quotes.html', joined=join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash('quotes are required to be longer than 10 characters.')
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = (
'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'
)
data = {'quo': request.form['content'], 'auth': request.form[
'author'], 'from': session['id_users']}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id, thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'
data = {'id': id}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route('/edit')
def edit():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * From users WHERE id_users = %(id)s'
data = {'id': session['id_users']}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users=users_table)
@app.route('/update', methods=['POST'])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash('please enter your first name.')
if len(request.form['l_name']) < 3:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if not is_valid:
return redirect('/edit')
else:
flash('sucessfully updated')
mysql = connectToMySQL(DATABASE)
query = (
'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'
)
data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],
'em': request.form['email'], 'id': session['id_users']}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route('/my_posts')
def my_post():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'
data = {'id': session['id_users']}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes=my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = 'something secret10'
DATABASE = 'exam_quote_dash'
EMAIL_REGEX = re.compile('^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$')
@app.route('/')
def signin():
return render_template('index.html')
@app.route('/register', methods=['POST'])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash('please enter your first name.')
if len(request.form['last_name']) < 2:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if len(request.form['password']) < 8:
is_valid = False
flash('password must be atleast 8 characters long.')
if request.form['password'] != request.form['confirm_password']:
is_valid = False
flash('passwords do not match.')
if not is_valid:
return redirect('/')
else:
flash('sucessfully added')
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = (
'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'
)
data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[
'first_name'], 'ln': request.form['last_name']}
id_users = mysql.query_db(query, data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM users WHERE email = %(em)s;'
data = {'em': request.form['email']}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form[
'password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash('Email and/or password does not match.')
return redirect('/')
else:
flash('Please enter your registered Email.')
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'
join = mysql.query_db(query)
return render_template('quotes.html', joined=join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash('quotes are required to be longer than 10 characters.')
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = (
'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'
)
data = {'quo': request.form['content'], 'auth': request.form[
'author'], 'from': session['id_users']}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id, thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'
data = {'id': id}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route('/edit')
def edit():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * From users WHERE id_users = %(id)s'
data = {'id': session['id_users']}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users=users_table)
@app.route('/update', methods=['POST'])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash('please enter your first name.')
if len(request.form['l_name']) < 3:
is_valid = False
flash('please enter your last name.')
if not EMAIL_REGEX.match(request.form['email']):
flash('Invalid email address!')
if not is_valid:
return redirect('/edit')
else:
flash('sucessfully updated')
mysql = connectToMySQL(DATABASE)
query = (
'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'
)
data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],
'em': request.form['email'], 'id': session['id_users']}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route('/my_posts')
def my_post():
mysql = connectToMySQL(DATABASE)
query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'
data = {'id': session['id_users']}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes=my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, render_template, request, redirect, flash, session
from mysqlconnection import connectToMySQL
from flask_bcrypt import Bcrypt
import re
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.secret_key = "something secret10"
DATABASE = "exam_quote_dash"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
#users
# id_users, first_name, last_name, email, password
#quotes
#id_quotes, from_user, liked_from, content, author
@app.route("/")
def signin():
return render_template("index.html")
@app.route("/register", methods=["POST"])
def register():
is_valid = True
if len(request.form['first_name']) < 2:
is_valid = False
flash("please enter your first name.")
if len(request.form['last_name']) < 2:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if len(request.form['password']) < 8:
is_valid = False
flash("password must be atleast 8 characters long.")
if (request.form['password'] != request.form['confirm_password']):
is_valid = False
flash("passwords do not match.")
if not is_valid:
return redirect('/')
else:
flash("sucessfully added")
mysql = connectToMySQL(DATABASE)
pw_hash = bcrypt.generate_password_hash(request.form['password'])
query = "INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);"
data = {
'em': request.form['email'],
'pw': pw_hash,
'fn': request.form['first_name'],
'ln': request.form['last_name']
}
id_users = mysql.query_db(query,data)
session['id_users'] = id_users
session['greeting'] = request.form['first_name']
return redirect('/quotes')
@app.route('/login', methods=['POST'])
def login():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM users WHERE email = %(em)s;"
data = {
'em': request.form['email']
}
result = mysql.query_db(query, data)
if len(result) > 0:
if bcrypt.check_password_hash(result[0]['password'], request.form['password']):
session['id_users'] = result[0]['id_users']
session['greeting'] = result[0]['first_name']
return redirect('/quotes')
else:
flash("Email and/or password does not match.")
return redirect('/')
else:
flash("Please enter your registered Email.")
return redirect('/')
@app.route('/success')
def success():
if 'id_users' not in session:
return redirect('/')
else:
return render_template('success.html')
@app.route('/quotes')
def quotes():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes JOIN users ON from_user = id_users;"
join = mysql.query_db(query)
return render_template('quotes.html', joined = join)
@app.route('/create', methods=['POST'])
def create():
is_valid = True
if len(request.form['content']) < 10:
flash("quotes are required to be longer than 10 characters.")
is_valid == False
if is_valid == True:
mysql = connectToMySQL(DATABASE)
query = "INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);"
data = {
'quo': request.form['content'],
'auth': request.form['author'],
'from': session['id_users']
}
mysql.query_db(query, data)
return redirect('/quotes')
@app.route('/delete/<id>/<thing>')
def delete(id,thing):
if session['id_users'] == int(thing):
mysql = connectToMySQL(DATABASE)
query = "DELETE FROM quotes WHERE id_quotes = %(id)s;"
data = {
'id': id
}
mysql.query_db(query, data)
return redirect('/quotes')
else:
flash("Unable to delete other's quotes")
return redirect('/quotes')
@app.route("/edit")
def edit():
mysql = connectToMySQL(DATABASE)
query = "SELECT * From users WHERE id_users = %(id)s"
data ={
'id' : session['id_users']
}
users_table = mysql.query_db(query, data)
return render_template('edit_account.html', users = users_table)
@app.route("/update", methods=["POST"])
def update():
is_valid = True
if len(request.form['f_name']) < 3:
is_valid = False
flash("please enter your first name.")
if len(request.form['l_name']) < 3:
is_valid = False
flash("please enter your last name.")
if not EMAIL_REGEX.match(request.form['email']):
flash("Invalid email address!")
if not is_valid:
return redirect('/edit')
else:
flash("sucessfully updated")
mysql = connectToMySQL(DATABASE)
query = "UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;"
data = {
"fn": request.form["f_name"],
"ln": request.form["l_name"],
"em": request.form["email"],
'id' : session['id_users']
}
id = mysql.query_db(query, data)
session['greeting'] = request.form['f_name']
return redirect('/quotes')
@app.route("/my_posts")
def my_post():
mysql = connectToMySQL(DATABASE)
query = "SELECT * FROM quotes WHERE from_user = %(id)s;"
data ={
'id' : session['id_users']
}
my_quotes = mysql.query_db(query, data)
return render_template('my_posts.html', quotes = my_quotes)
@app.route('/logout')
def logout():
session.clear()
return redirect('/')
if __name__=="__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "e732fa0e2b377a87b8b088303b277cc08cb695b3",
"index": 5279,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\n<mask token>\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\[email protected]('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "<mask token>\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = 'something secret10'\nDATABASE = 'exam_quote_dash'\nEMAIL_REGEX = re.compile('^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\\\.[a-zA-Z]+$')\n\n\[email protected]('/')\ndef signin():\n return render_template('index.html')\n\n\[email protected]('/register', methods=['POST'])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['last_name']) < 2:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if len(request.form['password']) < 8:\n is_valid = False\n flash('password must be atleast 8 characters long.')\n if request.form['password'] != request.form['confirm_password']:\n is_valid = False\n flash('passwords do not match.')\n if not is_valid:\n return redirect('/')\n else:\n flash('sucessfully added')\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = (\n 'INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);'\n )\n data = {'em': request.form['email'], 'pw': pw_hash, 'fn': request.form[\n 'first_name'], 'ln': request.form['last_name']}\n id_users = mysql.query_db(query, data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name']\n return redirect('/quotes')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM users WHERE email = %(em)s;'\n data = {'em': request.form['email']}\n result = mysql.query_db(query, data)\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form[\n 'password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash('Email and/or password does not match.')\n return redirect('/')\n else:\n flash('Please enter your registered Email.')\n return redirect('/')\n\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes JOIN users ON from_user = id_users;'\n join = mysql.query_db(query)\n return render_template('quotes.html', joined=join)\n\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n if len(request.form['content']) < 10:\n flash('quotes are required to be longer than 10 characters.')\n is_valid == False\n if is_valid == True:\n mysql = connectToMySQL(DATABASE)\n query = (\n 'INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);'\n )\n data = {'quo': request.form['content'], 'auth': request.form[\n 'author'], 'from': session['id_users']}\n mysql.query_db(query, data)\n return redirect('/quotes')\n\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id, thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = 'DELETE FROM quotes WHERE id_quotes = %(id)s;'\n data = {'id': id}\n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\n\[email protected]('/edit')\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * From users WHERE id_users = %(id)s'\n data = {'id': session['id_users']}\n users_table = mysql.query_db(query, data)\n return render_template('edit_account.html', users=users_table)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n is_valid = False\n flash('please enter your first name.')\n if len(request.form['l_name']) < 3:\n is_valid = False\n flash('please enter your last name.')\n if not EMAIL_REGEX.match(request.form['email']):\n flash('Invalid email address!')\n if not is_valid:\n return redirect('/edit')\n else:\n flash('sucessfully updated')\n mysql = connectToMySQL(DATABASE)\n query = (\n 'UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;'\n )\n data = {'fn': request.form['f_name'], 'ln': request.form['l_name'],\n 'em': request.form['email'], 'id': session['id_users']}\n id = mysql.query_db(query, data)\n session['greeting'] = request.form['f_name']\n return redirect('/quotes')\n\n\[email protected]('/my_posts')\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = 'SELECT * FROM quotes WHERE from_user = %(id)s;'\n data = {'id': session['id_users']}\n my_quotes = mysql.query_db(query, data)\n return render_template('my_posts.html', quotes=my_quotes)\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, render_template, request, redirect, flash, session\nfrom mysqlconnection import connectToMySQL\nfrom flask_bcrypt import Bcrypt\nimport re\n\napp = Flask(__name__)\nbcrypt = Bcrypt(app)\napp.secret_key = \"something secret10\"\nDATABASE = \"exam_quote_dash\"\nEMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\\.[a-zA-Z]+$') \n\n#users\n# id_users, first_name, last_name, email, password\n\n#quotes\n#id_quotes, from_user, liked_from, content, author\n\[email protected](\"/\")\ndef signin():\n return render_template(\"index.html\")\n\[email protected](\"/register\", methods=[\"POST\"])\ndef register():\n is_valid = True\n if len(request.form['first_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['last_name']) < 2:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if len(request.form['password']) < 8:\n \tis_valid = False\n \tflash(\"password must be atleast 8 characters long.\")\n if (request.form['password'] != request.form['confirm_password']):\n \tis_valid = False\n \tflash(\"passwords do not match.\")\n if not is_valid:\n return redirect('/')\n else:\n flash(\"sucessfully added\")\n mysql = connectToMySQL(DATABASE)\n pw_hash = bcrypt.generate_password_hash(request.form['password'])\n query = \"INSERT INTO users (email, password, first_name, last_name) VALUES (%(em)s,%(pw)s,%(fn)s,%(ln)s);\"\n data = {\n 'em': request.form['email'],\n 'pw': pw_hash,\n 'fn': request.form['first_name'],\n 'ln': request.form['last_name']\n }\n id_users = mysql.query_db(query,data)\n session['id_users'] = id_users\n session['greeting'] = request.form['first_name'] \n\n return redirect('/quotes')\n\[email protected]('/login', methods=['POST'])\ndef login():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM users WHERE email = %(em)s;\"\n data = {\n 'em': request.form['email']\n }\n result = mysql.query_db(query, data)\n\n if len(result) > 0:\n if bcrypt.check_password_hash(result[0]['password'], request.form['password']):\n session['id_users'] = result[0]['id_users']\n session['greeting'] = result[0]['first_name']\n return redirect('/quotes')\n else:\n flash(\"Email and/or password does not match.\")\n return redirect('/')\n else:\n flash(\"Please enter your registered Email.\")\n return redirect('/')\n\[email protected]('/success')\ndef success():\n if 'id_users' not in session:\n return redirect('/')\n else:\n return render_template('success.html')\n\[email protected]('/quotes')\ndef quotes():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes JOIN users ON from_user = id_users;\"\n join = mysql.query_db(query)\n\n return render_template('quotes.html', joined = join)\n\[email protected]('/create', methods=['POST'])\ndef create():\n is_valid = True\n\n if len(request.form['content']) < 10:\n flash(\"quotes are required to be longer than 10 characters.\")\n is_valid == False\n\n if is_valid == True: \n mysql = connectToMySQL(DATABASE)\n query = \"INSERT INTO quotes (content, author, from_user) VALUES (%(quo)s, %(auth)s, %(from)s);\"\n data = {\n 'quo': request.form['content'],\n 'auth': request.form['author'],\n\n 'from': session['id_users']\n }\n mysql.query_db(query, data)\n\n return redirect('/quotes')\n\[email protected]('/delete/<id>/<thing>')\ndef delete(id,thing):\n if session['id_users'] == int(thing):\n mysql = connectToMySQL(DATABASE)\n query = \"DELETE FROM quotes WHERE id_quotes = %(id)s;\"\n data = {\n 'id': id\n } \n mysql.query_db(query, data)\n return redirect('/quotes')\n else:\n flash(\"Unable to delete other's quotes\")\n return redirect('/quotes')\n\[email protected](\"/edit\")\ndef edit():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * From users WHERE id_users = %(id)s\"\n data ={ \n 'id' : session['id_users']\n }\n users_table = mysql.query_db(query, data)\n\n\n return render_template('edit_account.html', users = users_table)\n\[email protected](\"/update\", methods=[\"POST\"])\ndef update():\n is_valid = True\n if len(request.form['f_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your first name.\")\n if len(request.form['l_name']) < 3:\n \tis_valid = False\n \tflash(\"please enter your last name.\")\n if not EMAIL_REGEX.match(request.form['email']):\n flash(\"Invalid email address!\")\n if not is_valid:\n return redirect('/edit')\n else:\n flash(\"sucessfully updated\")\n mysql = connectToMySQL(DATABASE)\n query = \"UPDATE users Set first_name = %(fn)s, last_name = %(ln)s , email = %(em)s WHERE id_users = %(id)s;\"\n data = {\n \"fn\": request.form[\"f_name\"],\n \"ln\": request.form[\"l_name\"],\n \"em\": request.form[\"email\"],\n 'id' : session['id_users']\n }\n id = mysql.query_db(query, data)\n\n session['greeting'] = request.form['f_name'] \n return redirect('/quotes')\n\[email protected](\"/my_posts\")\ndef my_post():\n mysql = connectToMySQL(DATABASE)\n query = \"SELECT * FROM quotes WHERE from_user = %(id)s;\"\n data ={ \n 'id' : session['id_users']\n }\n my_quotes = mysql.query_db(query, data)\n\n return render_template('my_posts.html', quotes = my_quotes)\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect('/')\n\nif __name__==\"__main__\": \n app.run(debug=True) ",
"step-ids": [
9,
10,
12,
13,
15
]
}
|
[
9,
10,
12,
13,
15
] |
"""
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
import from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = ['resnext']
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
D = cardinality * out_channels // widen_factor
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
num_classes: number of classes
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.widen_factor = widen_factor
self.num_classes = num_classes
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(1024, num_classes)
self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)
self.bn_att = nn.BatchNorm2d(self.stages[3])
self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,
bias=False)
self.bn_att2 = nn.BatchNorm2d(num_classes)
self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,
bias=False)
self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,
bias=False)
self.bn_att3 = nn.BatchNorm2d(1)
self.att_gap = nn.AvgPool2d(16)
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU(inplace=True)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
ax = self.stage_att(x)
ax = self.relu(self.bn_att2(self.att_conv(ax)))
bs, cs, ys, xs = ax.shape
self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))
# self.att = self.att.view(bs, 1, ys, xs)
ax = self.att_conv2(ax)
ax = self.att_gap(ax)
ax = ax.view(ax.size(0), -1)
rx = x * self.att
rx = rx + x
rx = self.stage_3.forward(rx)
rx = F.avg_pool2d(rx, 8, 1)
rx = rx.view(-1, 1024)
rx = self.classifier(rx)
return ax, rx, self.att
def resnext(**kwargs):
"""Constructs a ResNeXt.
"""
model = CifarResNeXt(**kwargs)
return model
# """
# resneXt for cifar with pytorch
# Reference:
# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017
# """
#
# import torch
# import torch.nn as nn
# import math
#
#
# class Bottleneck(nn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):
# super(Bottleneck, self).__init__()
# D = int(planes * (baseWidth / 64.))
# C = cardinality
# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)
# self.bn1 = nn.BatchNorm2d(D * C)
# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)
# self.bn2 = nn.BatchNorm2d(D * C)
# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)
# self.bn3 = nn.BatchNorm2d(planes * 4)
# self.relu = nn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# if residual.size() != out.size():
# print(out.size(), residual.size())
# out += residual
# out = self.relu(out)
#
# return out
#
#
# class ResNeXt_Cifar(nn.Module):
#
# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):
# super(ResNeXt_Cifar, self).__init__()
# self.inplanes = 64
# self.cardinality = cardinality
# self.baseWidth = baseWidth
# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
# self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True)
# self.layer1 = self._make_layer(block, 64, layers[0])
# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
# self.avgpool = nn.AvgPool2d(8, stride=1)
# self.fc = nn.Linear(256 * block.expansion, num_classes)
#
# for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
#
# def _make_layer(self, block, planes, blocks, stride=1):
# downsample = None
# if stride != 1 or self.inplanes != planes * block.expansion:
# downsample = nn.Sequential(
# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
# nn.BatchNorm2d(planes * block.expansion)
# )
#
# layers = []
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))
# self.inplanes = planes * block.expansion
# for _ in range(1, blocks):
# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))
#
# return nn.Sequential(*layers)
#
# def forward(self, x):
# x = self.conv1(x)
# x = self.bn1(x)
# x = self.relu(x)
#
# x = self.layer1(x)
# x = self.layer2(x)
# x = self.layer3(x)
#
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
#
# return x
#
#
# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):
# assert (depth - 2) % 9 == 0
# n = int((depth - 2) / 9)
# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)
# return model
# if __name__ == '__main__':
# net = resneXt_cifar(29, 16, 64)
# y = net(torch.randn(1, 3, 32, 32))
# print(net)
# print(y.size())
|
normal
|
{
"blob_id": "50ed1512b0e6ff8e01f5d4aa034406fa78850176",
"index": 2293,
"step-1": "<mask token>\n\n\nclass CifarResNeXt(nn.Module):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ResNeXtBottleneck(nn.Module):\n <mask token>\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n",
"step-4": "<mask token>\n__all__ = ['resnext']\n\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n\n def __init__(self, in_channels, out_channels, stride, cardinality,\n widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=\n 1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride,\n padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride\n =1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels,\n out_channels, kernel_size=1, stride=stride, padding=0, bias\n =False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(\n out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n\n def __init__(self, cardinality, depth, num_classes, widen_factor=4,\n dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor,\n 256 * self.widen_factor]\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n self.stage_att = self.block('stage_att', self.stages[2], self.\n stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=\n 1, padding=0, bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1,\n padding=0, bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n init.kaiming_normal(self.classifier.weight)\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels,\n out_channels, pool_stride, self.cardinality, self.\n widen_factor))\n else:\n block.add_module(name_, ResNeXtBottleneck(out_channels,\n out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n return ax, rx, self.att\n\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n",
"step-5": "\n\"\"\"\nCreates a ResNeXt Model as defined in:\nXie, S., Girshick, R., Dollar, P., Tu, Z., & He, K. (2016).\nAggregated residual transformations for deep neural networks.\narXiv preprint arXiv:1611.05431.\nimport from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py\n\"\"\"\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\n__all__ = ['resnext']\n\nclass ResNeXtBottleneck(nn.Module):\n \"\"\"\n RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)\n \"\"\"\n def __init__(self, in_channels, out_channels, stride, cardinality, widen_factor):\n \"\"\" Constructor\n Args:\n in_channels: input channel dimensionality\n out_channels: output channel dimensionality\n stride: conv stride. Replaces pooling layer.\n cardinality: num of convolution groups.\n widen_factor: factor to reduce the input dimensionality before convolution.\n \"\"\"\n super(ResNeXtBottleneck, self).__init__()\n D = cardinality * out_channels // widen_factor\n self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_reduce = nn.BatchNorm2d(D)\n self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)\n self.bn = nn.BatchNorm2d(D)\n self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.bn_expand = nn.BatchNorm2d(out_channels)\n\n self.shortcut = nn.Sequential()\n if in_channels != out_channels:\n self.shortcut.add_module('shortcut_conv', nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False))\n self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n bottleneck = self.conv_reduce.forward(x)\n bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)\n bottleneck = self.conv_conv.forward(bottleneck)\n bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)\n bottleneck = self.conv_expand.forward(bottleneck)\n bottleneck = self.bn_expand.forward(bottleneck)\n residual = self.shortcut.forward(x)\n return F.relu(residual + bottleneck, inplace=True)\n\n\nclass CifarResNeXt(nn.Module):\n \"\"\"\n ResNext optimized for the Cifar dataset, as specified in\n https://arxiv.org/pdf/1611.05431.pdf\n \"\"\"\n def __init__(self, cardinality, depth, num_classes, widen_factor=4, dropRate=0):\n \"\"\" Constructor\n Args:\n cardinality: number of convolution groups.\n depth: number of layers.\n num_classes: number of classes\n widen_factor: factor to adjust the channel dimensionality\n \"\"\"\n super(CifarResNeXt, self).__init__()\n self.cardinality = cardinality\n self.depth = depth\n self.block_depth = (self.depth - 2) // 9\n self.widen_factor = widen_factor\n self.num_classes = num_classes\n self.output_size = 64\n self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]\n\n self.conv_1_3x3 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)\n self.bn_1 = nn.BatchNorm2d(64)\n self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)\n self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)\n self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)\n self.classifier = nn.Linear(1024, num_classes)\n\n self.stage_att = self.block('stage_att', self.stages[2], self.stages[3], 1)\n self.bn_att = nn.BatchNorm2d(self.stages[3])\n self.att_conv = nn.Conv2d(self.stages[3], num_classes, kernel_size=1, padding=0,\n bias=False)\n self.bn_att2 = nn.BatchNorm2d(num_classes)\n self.att_conv2 = nn.Conv2d(num_classes, num_classes, kernel_size=1, padding=0,\n bias=False)\n self.att_conv3 = nn.Conv2d(num_classes, 1, kernel_size=3, padding=1,\n bias=False)\n self.bn_att3 = nn.BatchNorm2d(1)\n self.att_gap = nn.AvgPool2d(16)\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU(inplace=True)\n\n init.kaiming_normal(self.classifier.weight)\n\n for key in self.state_dict():\n if key.split('.')[-1] == 'weight':\n if 'conv' in key:\n init.kaiming_normal(self.state_dict()[key], mode='fan_out')\n if 'bn' in key:\n self.state_dict()[key][...] = 1\n elif key.split('.')[-1] == 'bias':\n self.state_dict()[key][...] = 0\n\n def block(self, name, in_channels, out_channels, pool_stride=2):\n \"\"\" Stack n bottleneck modules where n is inferred from the depth of the network.\n Args:\n name: string name of the current block.\n in_channels: number of input channels\n out_channels: number of output channels\n pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.\n Returns: a Module consisting of n sequential bottlenecks.\n \"\"\"\n block = nn.Sequential()\n for bottleneck in range(self.block_depth):\n name_ = '%s_bottleneck_%d' % (name, bottleneck)\n if bottleneck == 0:\n block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,\n self.widen_factor))\n else:\n block.add_module(name_,\n ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.widen_factor))\n return block\n\n def forward(self, x):\n x = self.conv_1_3x3.forward(x)\n x = F.relu(self.bn_1.forward(x), inplace=True)\n x = self.stage_1.forward(x)\n x = self.stage_2.forward(x)\n\n ax = self.stage_att(x)\n ax = self.relu(self.bn_att2(self.att_conv(ax)))\n bs, cs, ys, xs = ax.shape\n self.att = self.sigmoid(self.bn_att3(self.att_conv3(ax)))\n # self.att = self.att.view(bs, 1, ys, xs)\n ax = self.att_conv2(ax)\n ax = self.att_gap(ax)\n ax = ax.view(ax.size(0), -1)\n\n rx = x * self.att\n rx = rx + x\n rx = self.stage_3.forward(rx)\n rx = F.avg_pool2d(rx, 8, 1)\n rx = rx.view(-1, 1024)\n rx = self.classifier(rx)\n\n return ax, rx, self.att\n\ndef resnext(**kwargs):\n \"\"\"Constructs a ResNeXt.\n \"\"\"\n model = CifarResNeXt(**kwargs)\n return model\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# \"\"\"\n# resneXt for cifar with pytorch\n# Reference:\n# [1] S. Xie, G. Ross, P. Dollar, Z. Tu and K. He Aggregated residual transformations for deep neural networks. In CVPR, 2017\n# \"\"\"\n#\n# import torch\n# import torch.nn as nn\n# import math\n#\n#\n# class Bottleneck(nn.Module):\n# expansion = 4\n#\n# def __init__(self, inplanes, planes, cardinality, baseWidth, stride=1, downsample=None):\n# super(Bottleneck, self).__init__()\n# D = int(planes * (baseWidth / 64.))\n# C = cardinality\n# self.conv1 = nn.Conv2d(inplanes, D * C, kernel_size=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(D * C)\n# self.conv2 = nn.Conv2d(D * C, D * C, kernel_size=3, stride=stride, padding=1, groups=C, bias=False)\n# self.bn2 = nn.BatchNorm2d(D * C)\n# self.conv3 = nn.Conv2d(D * C, planes * 4, kernel_size=1, bias=False)\n# self.bn3 = nn.BatchNorm2d(planes * 4)\n# self.relu = nn.ReLU(inplace=True)\n# self.downsample = downsample\n# self.stride = stride\n#\n# def forward(self, x):\n# residual = x\n#\n# out = self.conv1(x)\n# out = self.bn1(out)\n# out = self.relu(out)\n#\n# out = self.conv2(out)\n# out = self.bn2(out)\n# out = self.relu(out)\n#\n# out = self.conv3(out)\n# out = self.bn3(out)\n#\n# if self.downsample is not None:\n# residual = self.downsample(x)\n#\n# if residual.size() != out.size():\n# print(out.size(), residual.size())\n# out += residual\n# out = self.relu(out)\n#\n# return out\n#\n#\n# class ResNeXt_Cifar(nn.Module):\n#\n# def __init__(self, block, layers, cardinality, baseWidth, num_classes=10):\n# super(ResNeXt_Cifar, self).__init__()\n# self.inplanes = 64\n# self.cardinality = cardinality\n# self.baseWidth = baseWidth\n# self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n# self.bn1 = nn.BatchNorm2d(64)\n# self.relu = nn.ReLU(inplace=True)\n# self.layer1 = self._make_layer(block, 64, layers[0])\n# self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n# self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n# self.avgpool = nn.AvgPool2d(8, stride=1)\n# self.fc = nn.Linear(256 * block.expansion, num_classes)\n#\n# for m in self.modules():\n# if isinstance(m, nn.Conv2d):\n# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n# m.weight.data.normal_(0, math.sqrt(2. / n))\n# elif isinstance(m, nn.BatchNorm2d):\n# m.weight.data.fill_(1)\n# m.bias.data.zero_()\n#\n# def _make_layer(self, block, planes, blocks, stride=1):\n# downsample = None\n# if stride != 1 or self.inplanes != planes * block.expansion:\n# downsample = nn.Sequential(\n# nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n# nn.BatchNorm2d(planes * block.expansion)\n# )\n#\n# layers = []\n# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth, stride, downsample))\n# self.inplanes = planes * block.expansion\n# for _ in range(1, blocks):\n# layers.append(block(self.inplanes, planes, self.cardinality, self.baseWidth))\n#\n# return nn.Sequential(*layers)\n#\n# def forward(self, x):\n# x = self.conv1(x)\n# x = self.bn1(x)\n# x = self.relu(x)\n#\n# x = self.layer1(x)\n# x = self.layer2(x)\n# x = self.layer3(x)\n#\n# x = self.avgpool(x)\n# x = x.view(x.size(0), -1)\n# x = self.fc(x)\n#\n# return x\n#\n#\n# def resneXt_cifar(depth, cardinality, baseWidth, **kwargs):\n# assert (depth - 2) % 9 == 0\n# n = int((depth - 2) / 9)\n# model = ResNeXt_Cifar(Bottleneck, [n, n, n], cardinality, baseWidth, **kwargs)\n# return model\n\n\n# if __name__ == '__main__':\n# net = resneXt_cifar(29, 16, 64)\n# y = net(torch.randn(1, 3, 32, 32))\n# print(net)\n# print(y.size())",
"step-ids": [
1,
8,
10,
11,
13
]
}
|
[
1,
8,
10,
11,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
'Give the length of each side in order to compute the area of a triangle.')
<|reserved_special_token_0|>
print('The triangle area is:', triangleArea)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(
'Give the length of each side in order to compute the area of a triangle.')
lenA = float(input('Give the length of side A:'))
lenB = float(input('Give the length of side B:'))
lenC = float(input('Give the length of side C:'))
triangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +
lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))
print('The triangle area is:', triangleArea)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
print(
'Give the length of each side in order to compute the area of a triangle.')
lenA = float(input('Give the length of side A:'))
lenB = float(input('Give the length of side B:'))
lenC = float(input('Give the length of side C:'))
triangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +
lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))
print('The triangle area is:', triangleArea)
<|reserved_special_token_1|>
"""
Exercise 3 from the Python tutorial Part 1 on:
https://codeandwork.github.io/courses/prep/pythonTutorial1.html
"""
import math
print("Give the length of each side in order to compute the area of a triangle.")
lenA = float(input("Give the length of side A:"))
lenB = float(input("Give the length of side B:"))
lenC = float(input("Give the length of side C:"))
triangleArea = (1/4) * math.sqrt((lenA+lenB+lenC) * (-lenA+lenB+lenC) * (lenA-lenB+lenC) * (lenA+lenB-lenC))
print("The triangle area is:", triangleArea)
|
flexible
|
{
"blob_id": "398cb05218a9772a0b62fdfbacc465b26427827d",
"index": 2854,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\n<mask token>\nprint('The triangle area is:', triangleArea)\n",
"step-3": "<mask token>\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\nlenA = float(input('Give the length of side A:'))\nlenB = float(input('Give the length of side B:'))\nlenC = float(input('Give the length of side C:'))\ntriangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +\n lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))\nprint('The triangle area is:', triangleArea)\n",
"step-4": "<mask token>\nimport math\nprint(\n 'Give the length of each side in order to compute the area of a triangle.')\nlenA = float(input('Give the length of side A:'))\nlenB = float(input('Give the length of side B:'))\nlenC = float(input('Give the length of side C:'))\ntriangleArea = 1 / 4 * math.sqrt((lenA + lenB + lenC) * (-lenA + lenB +\n lenC) * (lenA - lenB + lenC) * (lenA + lenB - lenC))\nprint('The triangle area is:', triangleArea)\n",
"step-5": "\"\"\"\n Exercise 3 from the Python tutorial Part 1 on:\n https://codeandwork.github.io/courses/prep/pythonTutorial1.html\n\"\"\"\n\nimport math\n\nprint(\"Give the length of each side in order to compute the area of a triangle.\")\nlenA = float(input(\"Give the length of side A:\"))\nlenB = float(input(\"Give the length of side B:\"))\nlenC = float(input(\"Give the length of side C:\"))\n\ntriangleArea = (1/4) * math.sqrt((lenA+lenB+lenC) * (-lenA+lenB+lenC) * (lenA-lenB+lenC) * (lenA+lenB-lenC))\n\nprint(\"The triangle area is:\", triangleArea)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_add(a, b):
return a + b
<|reserved_special_token_1|>
import sys
import os
def my_add(a, b):
return a + b
|
flexible
|
{
"blob_id": "cc81e13bba0ea0186966bce7f5aac05bb106e971",
"index": 5935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_add(a, b):\n return a + b\n",
"step-3": "import sys\nimport os\n\n\ndef my_add(a, b):\n return a + b\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from .utils import parse_query_parameters
class CollectionMixin(with_metaclass(ABCMeta, object)):
@abstractmethod
def list(self, size=100, offset=None, **filter_fields):
"""
:param size: A limit on the number of objects to be returned.
:type size: int
:param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.
:type offset: uuid.UUID
:param filter_fields: Dictionary containing values to filter for
:type filter_fields: dict
:rtype: dict
:return: Dictionary containing dictionaries
"""
def iterate(self, window_size=10, **filter_fields):
current_offset = None
while True:
response = self.list(size=window_size, offset=current_offset, **filter_fields)
for item in response['data']:
yield item
next_url = response.get('next', None)
if next_url is None:
return
current_offset = parse_query_parameters(next_url).get('offset')[0]
|
normal
|
{
"blob_id": "b63ed9e09b9e8c539aff765d719f3610283663fe",
"index": 4496,
"step-1": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n <mask token>\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-3": "<mask token>\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-4": "from __future__ import unicode_literals, print_function\nfrom abc import ABCMeta, abstractmethod\nfrom six import with_metaclass\nfrom .utils import parse_query_parameters\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset,\n **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import with_metaclass\n\nfrom .utils import parse_query_parameters\n\n\nclass CollectionMixin(with_metaclass(ABCMeta, object)):\n @abstractmethod\n def list(self, size=100, offset=None, **filter_fields):\n \"\"\"\n :param size: A limit on the number of objects to be returned.\n :type size: int\n :param offset: A cursor used for pagination. offset is an object identifier that defines a place in the list.\n :type offset: uuid.UUID\n :param filter_fields: Dictionary containing values to filter for\n :type filter_fields: dict\n :rtype: dict\n :return: Dictionary containing dictionaries\n \"\"\"\n\n def iterate(self, window_size=10, **filter_fields):\n current_offset = None\n while True:\n response = self.list(size=window_size, offset=current_offset, **filter_fields)\n for item in response['data']:\n yield item\n next_url = response.get('next', None)\n if next_url is None:\n return\n current_offset = parse_query_parameters(next_url).get('offset')[0]\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
SQLALCHEMY_ECHO = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED = True
CSRF_SESSION_KEY = (
'8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')
UPLOAD_FOLDER = '%s/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
<|reserved_special_token_1|>
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
SQLALCHEMY_ECHO = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED = True
CSRF_SESSION_KEY = (
'8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')
UPLOAD_FOLDER = '%s/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
<|reserved_special_token_1|>
import os
_basedir = os.path.abspath(os.path.dirname(__file__))
DEBUG = True
SECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')
SQLALCHEMY_ECHO = True
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 8
CSRF_ENABLED = True
CSRF_SESSION_KEY = '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A'
UPLOAD_FOLDER = '%s/images'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
|
flexible
|
{
"blob_id": "6ee71cf61ae6a79ec0cd06f1ddc7dc614a76c7b9",
"index": 6547,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n_basedir = os.path.abspath(os.path.dirname(__file__))\nDEBUG = True\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 8\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = (\n '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-3": "import os\n_basedir = os.path.abspath(os.path.dirname(__file__))\nDEBUG = True\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\nTHREADS_PER_PAGE = 8\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = (\n '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A')\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-4": "import os\n_basedir = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = True\n\nSECRET_KEY = '06A52C5B30EC2960310B45E4E0FF21C5D6C86C47D91FE19FA5934EFF445276A0'\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(_basedir, 'app.db')\nSQLALCHEMY_ECHO = True\nDATABASE_CONNECT_OPTIONS = {}\n\nTHREADS_PER_PAGE = 8\n\nCSRF_ENABLED = True\nCSRF_SESSION_KEY = '8C371D8166DA8A9F770DAB562878BDD8704F079BB735D607CE8E2C507D55359A'\n\nUPLOAD_FOLDER = '%s/images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
<|reserved_special_token_0|>
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
<|reserved_special_token_0|>
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
<|reserved_special_token_0|>
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
<|reserved_special_token_0|>
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
<|reserved_special_token_0|>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ROOT.gROOT.SetBatch()
ROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')
<|reserved_special_token_0|>
dirname = 'VLQToHiggsPairProd'
varial.settings.rootfile_postfixes = ['.png', '.pdf']
varial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')
current_tag = varial.settings.git_tag
smpls = list()
smpls.append(Sample(name='QCD', legend='QCD'))
smpls.append(Sample(name='TTJets', legend='TTJets'))
smpls.append(Sample(name='WJets', legend='WJets'))
smpls.append(Sample(name='ZJets', legend='ZJets'))
analysis.all_samples = dict((s.name, s) for s in smpls)
varial.settings.defaults_Legend['x_pos'] = 0.8
varial.settings.defaults_Legend['label_width'] = 0.36
varial.settings.defaults_Legend['label_height'] = 0.03
varial.settings.box_text_size = 0.03
varial.settings.colors = {'TTJets': 632, 'WJets': 878, 'ZJets': 596,
'TpTp_M1000': 870}
current_cuts = ['AfterPresel', 'FullSelection']
current_hists = ['/EventHists', '/MuonHists']
use_cuts = False
use_histos = False
varial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
def norm_cf_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = norm_histos_to_first_bin(wrps)
wrps = label_axes(wrps)
return wrps
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
def stack_histos_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_stacked_hook
kws['plot_setup'] = gen.mc_stack_n_data_sum
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
tagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')
tagger.run()
p1 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=select_histograms, plotter_factory=stack_histos_factory,
combine_files=True)
p2 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.
endswith('raw'), plotter_factory=norm_cf_factory, combine_files=True)
p3 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.
endswith('raw'), plotter_factory=do_nothing_factory, combine_files=True)
p4 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=
select_splithistograms, plotter_factory=for_eff_factory, combine_files=
False)
p5 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
filter_keyfunc=select_splithistograms, plotter_factory=for_eff_factory,
combine_files=True)
p6 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.
startswith('cf_') and not w.name.endswith('raw'), plotter_factory=
norm_cf_factory, combine_files=False)
p7 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.
startswith('cf_') and not w.name.endswith('raw'), plotter_factory=
do_nothing_factory, combine_files=False)
time.sleep(1)
p1.run()
p2.run()
p3.run()
p5.run()
varial.tools.WebCreator().run()
<|reserved_special_token_1|>
#!/usr/bin/env python
import ROOT
ROOT.gROOT.SetBatch()
ROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')
import os
import time
import varial.tools
import varial.generators as gen
import itertools
from varial.sample import Sample
import varial.analysis as analysis
# import varial.toolinterface
dirname = 'VLQToHiggsPairProd'
varial.settings.rootfile_postfixes = ['.png','.pdf']
varial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')
current_tag = varial.settings.git_tag
# sample definitions
smpls = list()
smpls.append(Sample(
name='QCD',
legend='QCD'
))
smpls.append(Sample(
name='TTJets',
legend='TTJets'
))
smpls.append(Sample(
name='WJets',
legend='WJets'
))
smpls.append(Sample(
name='ZJets',
legend='ZJets'
))
analysis.all_samples = dict((s.name, s) for s in smpls)
varial.settings.defaults_Legend['x_pos'] = 0.80
varial.settings.defaults_Legend['label_width'] = 0.36
varial.settings.defaults_Legend['label_height'] = 0.03
# varial.settings.debug_mode = True
varial.settings.box_text_size = 0.03
varial.settings.colors = {
'TTJets': 632,
'WJets': 878,
'ZJets': 596,
'TpTp_M1000': 870,
# 'TpJ_TH_M800_NonTlep': 434,
}
# SELECT HISTOGRAMS TO PLOT HERE!
# use these functions to specifically select histograms for plotting
current_cuts = ['AfterPresel', 'FullSelection'] # 'Nminus1-MuonPtCut', 'OneCut-HTCut', 'FullSelection', 'Nminus1-6OneHiggsTagCut'
current_hists = ['/EventHists', '/MuonHists'] # "/ElectronHists", '/MuonHists', '/JetHists', '/TopJetHists', '/EventHists', '/GenHists/w_decay_lin', '/GenHists/w_decay_log'
use_cuts = False
use_histos = False
varial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
# if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):
# use_this = False
# if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):
# use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
# if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):
# use_this = False
# if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):
# use_this = False
return use_this
# SOME FUNCTIONS TO MANIPULATE HISTOGRAMS
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1. / firstbin)
info = wrp.all_info()
info["lumi"] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
# HOOK FUNCTIONS FOR PLOTTER_FACTORIES; manipulate histograms here
def for_stacked_hook(wrps):
# wrps = norm_cf_plots(wrps)
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(
wrps,
sample=lambda w: w.file_path.split('.')[-2],
analyzer=lambda w: w.in_file_path[0],
legend=lambda w: w.sample,
is_signal=lambda w: 'TpTp_M' in w.sample,
lumi=lambda w: 1.
)
# wrps = gen.imap_conditional(wrps, lambda w: 'TpJ_TH_M800' in w.sample, gen.op.norm_to_lumi)
wrps = label_axes(wrps)
return wrps
def norm_cf_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = norm_histos_to_first_bin(wrps)
wrps = label_axes(wrps)
return wrps
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(
wrps,
sample=lambda w: w.file_path.split('.')[-2],
analyzer=lambda w: w.in_file_path[0],
legend=lambda w: ('100* ' if 'TpTp_M' in w.sample else '') + w.sample,
is_signal=lambda w: 'TpTp_M' in w.sample,
lumi=lambda w: 0.01 if 'TpTp_M' in w.sample else 1.
)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
# def calc_stack_order(wrps):
# for w in wrps:
# def stack_by_max(wrps):
# wrps = calc_stack_order(wrps)
# wrps = gen.mc_stack_n_data_sum(wrps)
# return wrps
# PLOTTER FACTORIES; select here in general which histograms to plot, how to manipulate them a.s.o.
def stack_histos_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_stacked_hook
kws['plot_setup'] = gen.mc_stack_n_data_sum
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def norm_cf_factory(**kws):
# kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w : w.name + '_norm'
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
# kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def create_name(name):
return name+'v'+varial.settings.git_tag
tagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')
tagger.run()
p1 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
# filter_keyfunc=lambda w: not w.name.startswith('cf_'),
filter_keyfunc=select_histograms,
plotter_factory=stack_histos_factory,
combine_files=True
)
p2 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=norm_cf_factory,
combine_files=True
)
p3 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=do_nothing_factory,
combine_files=True
)
p4 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=select_splithistograms,
plotter_factory=for_eff_factory,
combine_files=False
)
p5 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
# filter_keyfunc=lambda w: not w.name.startswith('cf_'),
filter_keyfunc=select_splithistograms,
plotter_factory=for_eff_factory,
combine_files=True
)
p6 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=norm_cf_factory,
combine_files=False
)
p7 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=do_nothing_factory,
combine_files=False
)
time.sleep(1)
p1.run()
p2.run()
p3.run()
# p4.run()
p5.run()
# p6.run()
# p7.run()
varial.tools.WebCreator().run()
# os.system('rm -r ~/www/TprimeAnalysis/%s' % create_name(dirname))
# os.system('cp -r %s ~/www/TprimeAnalysis/' % create_name(dirname))
|
flexible
|
{
"blob_id": "05ced056bf2f59f85bef82e53803e7df7ff8c8df",
"index": 1156,
"step-1": "<mask token>\n\n\ndef select_histograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\n<mask token>\n\n\ndef norm_to_first_bin(wrp):\n histo = wrp.histo.Clone()\n firstbin = histo.GetBinContent(1)\n histo.Scale(1.0 / firstbin)\n info = wrp.all_info()\n info['lumi'] /= firstbin\n return varial.wrappers.HistoWrapper(histo, **info)\n\n\n<mask token>\n\n\ndef norm_histos_to_integral(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(wrp)\n else:\n yield wrp\n\n\ndef label_axes(wrps):\n for w in wrps:\n if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':\n w.histo.GetXaxis().SetTitle(w.histo.GetTitle())\n w.histo.GetYaxis().SetTitle('events')\n w.histo.SetTitle('')\n yield w\n\n\n<mask token>\n\n\ndef for_stacked_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.\n sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef do_nothing_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef norm_cf_factory(**kws):\n kws['hook_loaded_histos'] = norm_cf_hook\n kws['save_lin_log_scale'] = True\n kws['save_name_func'] = lambda w: w.name + '_norm'\n return varial.tools.Plotter(**kws)\n\n\ndef do_nothing_factory(**kws):\n kws['hook_loaded_histos'] = do_nothing_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef for_eff_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_eff_plots_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef create_name(name):\n return name + 'v' + varial.settings.git_tag\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef select_histograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef select_splithistograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef norm_to_first_bin(wrp):\n histo = wrp.histo.Clone()\n firstbin = histo.GetBinContent(1)\n histo.Scale(1.0 / firstbin)\n info = wrp.all_info()\n info['lumi'] /= firstbin\n return varial.wrappers.HistoWrapper(histo, **info)\n\n\n<mask token>\n\n\ndef norm_histos_to_integral(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(wrp)\n else:\n yield wrp\n\n\ndef label_axes(wrps):\n for w in wrps:\n if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':\n w.histo.GetXaxis().SetTitle(w.histo.GetTitle())\n w.histo.GetYaxis().SetTitle('events')\n w.histo.SetTitle('')\n yield w\n\n\ndef norm_cf_plots(wrps):\n for w in wrps:\n if w.name.startswith('cf_') and isinstance(w, varial.wrappers.\n HistoWrapper):\n yield varial.operations.norm_to_integral(w)\n else:\n yield w\n\n\ndef for_stacked_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.\n sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef do_nothing_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef for_eff_plots_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (\n '100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda\n w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.\n sample else 1.0)\n wrps = gen.gen_make_eff_graphs(wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef norm_cf_factory(**kws):\n kws['hook_loaded_histos'] = norm_cf_hook\n kws['save_lin_log_scale'] = True\n kws['save_name_func'] = lambda w: w.name + '_norm'\n return varial.tools.Plotter(**kws)\n\n\ndef do_nothing_factory(**kws):\n kws['hook_loaded_histos'] = do_nothing_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef for_eff_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_eff_plots_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef create_name(name):\n return name + 'v' + varial.settings.git_tag\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef select_histograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef select_splithistograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef norm_to_first_bin(wrp):\n histo = wrp.histo.Clone()\n firstbin = histo.GetBinContent(1)\n histo.Scale(1.0 / firstbin)\n info = wrp.all_info()\n info['lumi'] /= firstbin\n return varial.wrappers.HistoWrapper(histo, **info)\n\n\ndef norm_histos_to_first_bin(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield norm_to_first_bin(wrp)\n else:\n yield wrp\n\n\ndef norm_histos_to_integral(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(wrp)\n else:\n yield wrp\n\n\ndef label_axes(wrps):\n for w in wrps:\n if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':\n w.histo.GetXaxis().SetTitle(w.histo.GetTitle())\n w.histo.GetYaxis().SetTitle('events')\n w.histo.SetTitle('')\n yield w\n\n\ndef norm_cf_plots(wrps):\n for w in wrps:\n if w.name.startswith('cf_') and isinstance(w, varial.wrappers.\n HistoWrapper):\n yield varial.operations.norm_to_integral(w)\n else:\n yield w\n\n\ndef for_stacked_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.\n sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef do_nothing_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef for_eff_plots_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (\n '100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda\n w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.\n sample else 1.0)\n wrps = gen.gen_make_eff_graphs(wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\n<mask token>\n\n\ndef norm_cf_factory(**kws):\n kws['hook_loaded_histos'] = norm_cf_hook\n kws['save_lin_log_scale'] = True\n kws['save_name_func'] = lambda w: w.name + '_norm'\n return varial.tools.Plotter(**kws)\n\n\ndef do_nothing_factory(**kws):\n kws['hook_loaded_histos'] = do_nothing_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef for_eff_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_eff_plots_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef create_name(name):\n return name + 'v' + varial.settings.git_tag\n\n\n<mask token>\n",
"step-4": "<mask token>\nROOT.gROOT.SetBatch()\nROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')\n<mask token>\ndirname = 'VLQToHiggsPairProd'\nvarial.settings.rootfile_postfixes = ['.png', '.pdf']\nvarial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')\ncurrent_tag = varial.settings.git_tag\nsmpls = list()\nsmpls.append(Sample(name='QCD', legend='QCD'))\nsmpls.append(Sample(name='TTJets', legend='TTJets'))\nsmpls.append(Sample(name='WJets', legend='WJets'))\nsmpls.append(Sample(name='ZJets', legend='ZJets'))\nanalysis.all_samples = dict((s.name, s) for s in smpls)\nvarial.settings.defaults_Legend['x_pos'] = 0.8\nvarial.settings.defaults_Legend['label_width'] = 0.36\nvarial.settings.defaults_Legend['label_height'] = 0.03\nvarial.settings.box_text_size = 0.03\nvarial.settings.colors = {'TTJets': 632, 'WJets': 878, 'ZJets': 596,\n 'TpTp_M1000': 870}\ncurrent_cuts = ['AfterPresel', 'FullSelection']\ncurrent_hists = ['/EventHists', '/MuonHists']\nuse_cuts = False\nuse_histos = False\nvarial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']\n\n\ndef select_histograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef select_splithistograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in\n current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n return use_this\n\n\ndef norm_to_first_bin(wrp):\n histo = wrp.histo.Clone()\n firstbin = histo.GetBinContent(1)\n histo.Scale(1.0 / firstbin)\n info = wrp.all_info()\n info['lumi'] /= firstbin\n return varial.wrappers.HistoWrapper(histo, **info)\n\n\ndef norm_histos_to_first_bin(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield norm_to_first_bin(wrp)\n else:\n yield wrp\n\n\ndef norm_histos_to_integral(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(wrp)\n else:\n yield wrp\n\n\ndef label_axes(wrps):\n for w in wrps:\n if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':\n w.histo.GetXaxis().SetTitle(w.histo.GetTitle())\n w.histo.GetYaxis().SetTitle('events')\n w.histo.SetTitle('')\n yield w\n\n\ndef norm_cf_plots(wrps):\n for w in wrps:\n if w.name.startswith('cf_') and isinstance(w, varial.wrappers.\n HistoWrapper):\n yield varial.operations.norm_to_integral(w)\n else:\n yield w\n\n\ndef for_stacked_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.\n sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef norm_cf_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = norm_histos_to_first_bin(wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef do_nothing_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef for_eff_plots_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(\n '.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (\n '100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda\n w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.\n sample else 1.0)\n wrps = gen.gen_make_eff_graphs(wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\ndef stack_histos_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_stacked_hook\n kws['plot_setup'] = gen.mc_stack_n_data_sum\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef norm_cf_factory(**kws):\n kws['hook_loaded_histos'] = norm_cf_hook\n kws['save_lin_log_scale'] = True\n kws['save_name_func'] = lambda w: w.name + '_norm'\n return varial.tools.Plotter(**kws)\n\n\ndef do_nothing_factory(**kws):\n kws['hook_loaded_histos'] = do_nothing_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef for_eff_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_eff_plots_hook\n kws['save_lin_log_scale'] = True\n return varial.tools.Plotter(**kws)\n\n\ndef create_name(name):\n return name + 'v' + varial.settings.git_tag\n\n\ntagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')\ntagger.run()\np1 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),\n filter_keyfunc=select_histograms, plotter_factory=stack_histos_factory,\n combine_files=True)\np2 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.\n endswith('raw'), plotter_factory=norm_cf_factory, combine_files=True)\np3 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.\n endswith('raw'), plotter_factory=do_nothing_factory, combine_files=True)\np4 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',\n pattern='v1.19_unmerged_files/*.root', filter_keyfunc=\n select_splithistograms, plotter_factory=for_eff_factory, combine_files=\n False)\np5 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',\n filter_keyfunc=select_splithistograms, plotter_factory=for_eff_factory,\n combine_files=True)\np6 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',\n pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.\n startswith('cf_') and not w.name.endswith('raw'), plotter_factory=\n norm_cf_factory, combine_files=False)\np7 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',\n pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.\n startswith('cf_') and not w.name.endswith('raw'), plotter_factory=\n do_nothing_factory, combine_files=False)\ntime.sleep(1)\np1.run()\np2.run()\np3.run()\np5.run()\nvarial.tools.WebCreator().run()\n",
"step-5": "#!/usr/bin/env python\n\nimport ROOT\nROOT.gROOT.SetBatch()\nROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')\n\nimport os\nimport time\nimport varial.tools\nimport varial.generators as gen\nimport itertools\nfrom varial.sample import Sample\nimport varial.analysis as analysis\n# import varial.toolinterface\n\ndirname = 'VLQToHiggsPairProd'\n\nvarial.settings.rootfile_postfixes = ['.png','.pdf']\n\nvarial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')\n\ncurrent_tag = varial.settings.git_tag\n\n# sample definitions\nsmpls = list()\n\n\nsmpls.append(Sample(\n name='QCD',\n legend='QCD'\n))\n\nsmpls.append(Sample(\n name='TTJets',\n legend='TTJets'\n))\n\nsmpls.append(Sample(\n name='WJets',\n legend='WJets'\n))\n\nsmpls.append(Sample(\n name='ZJets',\n legend='ZJets'\n))\n\nanalysis.all_samples = dict((s.name, s) for s in smpls)\n\nvarial.settings.defaults_Legend['x_pos'] = 0.80\nvarial.settings.defaults_Legend['label_width'] = 0.36\nvarial.settings.defaults_Legend['label_height'] = 0.03\n# varial.settings.debug_mode = True\nvarial.settings.box_text_size = 0.03\nvarial.settings.colors = {\n 'TTJets': 632, \n 'WJets': 878,\n 'ZJets': 596, \n 'TpTp_M1000': 870, \n # 'TpJ_TH_M800_NonTlep': 434,\n}\n\n# SELECT HISTOGRAMS TO PLOT HERE!\n\n# use these functions to specifically select histograms for plotting\ncurrent_cuts = ['AfterPresel', 'FullSelection'] # 'Nminus1-MuonPtCut', 'OneCut-HTCut', 'FullSelection', 'Nminus1-6OneHiggsTagCut'\ncurrent_hists = ['/EventHists', '/MuonHists'] # \"/ElectronHists\", '/MuonHists', '/JetHists', '/TopJetHists', '/EventHists', '/GenHists/w_decay_lin', '/GenHists/w_decay_log'\n\nuse_cuts = False\nuse_histos = False\n\nvarial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']\n\ndef select_histograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n # if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):\n # use_this = False\n # if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):\n # use_this = False\n return use_this\n\ndef select_splithistograms(wrp):\n use_this = True\n if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):\n use_this = False\n if wrp.name.startswith('cf_'):\n use_this = False\n if use_histos and all(c not in wrp.in_file_path for c in current_hists):\n use_this = False\n # if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):\n # use_this = False\n # if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):\n # use_this = False\n return use_this \n\n\n\n# SOME FUNCTIONS TO MANIPULATE HISTOGRAMS\n\ndef norm_to_first_bin(wrp):\n histo = wrp.histo.Clone()\n firstbin = histo.GetBinContent(1)\n histo.Scale(1. / firstbin)\n info = wrp.all_info()\n info[\"lumi\"] /= firstbin\n return varial.wrappers.HistoWrapper(histo, **info)\n\ndef norm_histos_to_first_bin(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield norm_to_first_bin(wrp)\n else:\n yield wrp\n\ndef norm_histos_to_integral(wrps):\n for wrp in wrps:\n if isinstance(wrp, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(wrp)\n else:\n yield wrp\n\n\ndef label_axes(wrps):\n for w in wrps:\n if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':\n w.histo.GetXaxis().SetTitle(w.histo.GetTitle())\n w.histo.GetYaxis().SetTitle('events')\n w.histo.SetTitle('')\n yield w\n\ndef norm_cf_plots(wrps):\n for w in wrps:\n if w.name.startswith('cf_') and isinstance(w, varial.wrappers.HistoWrapper):\n yield varial.operations.norm_to_integral(w)\n else:\n yield w\n\n\n# HOOK FUNCTIONS FOR PLOTTER_FACTORIES; manipulate histograms here\n\ndef for_stacked_hook(wrps):\n # wrps = norm_cf_plots(wrps)\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(\n wrps,\n sample=lambda w: w.file_path.split('.')[-2],\n analyzer=lambda w: w.in_file_path[0],\n legend=lambda w: w.sample,\n is_signal=lambda w: 'TpTp_M' in w.sample,\n lumi=lambda w: 1.\n )\n # wrps = gen.imap_conditional(wrps, lambda w: 'TpJ_TH_M800' in w.sample, gen.op.norm_to_lumi)\n wrps = label_axes(wrps)\n return wrps\n\ndef norm_cf_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = norm_histos_to_first_bin(wrps)\n wrps = label_axes(wrps)\n return wrps\n\ndef do_nothing_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = label_axes(wrps)\n return wrps\n\ndef for_eff_plots_hook(wrps):\n wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)\n wrps = gen.gen_add_wrp_info(\n wrps,\n sample=lambda w: w.file_path.split('.')[-2],\n analyzer=lambda w: w.in_file_path[0],\n legend=lambda w: ('100* ' if 'TpTp_M' in w.sample else '') + w.sample,\n is_signal=lambda w: 'TpTp_M' in w.sample,\n lumi=lambda w: 0.01 if 'TpTp_M' in w.sample else 1.\n )\n wrps = gen.gen_make_eff_graphs(wrps)\n wrps = label_axes(wrps)\n return wrps\n\n\n# def calc_stack_order(wrps):\n# for w in wrps:\n\n\n# def stack_by_max(wrps):\n# wrps = calc_stack_order(wrps)\n# wrps = gen.mc_stack_n_data_sum(wrps)\n# return wrps\n\n\n# PLOTTER FACTORIES; select here in general which histograms to plot, how to manipulate them a.s.o.\n\ndef stack_histos_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_stacked_hook\n kws['plot_setup'] = gen.mc_stack_n_data_sum\n kws['save_lin_log_scale'] = True\n # kws['save_log_scale'] = True\n # kws['hook_canvas_pre_build'] = canvas_hook\n # kws['hook_canvas_post_build'] = canvas_hook\n return varial.tools.Plotter(**kws)\n\ndef norm_cf_factory(**kws):\n # kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = norm_cf_hook\n kws['save_lin_log_scale'] = True\n kws['save_name_func'] = lambda w : w.name + '_norm'\n # kws['save_log_scale'] = True\n # kws['hook_canvas_pre_build'] = canvas_hook\n # kws['hook_canvas_post_build'] = canvas_hook\n return varial.tools.Plotter(**kws)\n\ndef do_nothing_factory(**kws):\n # kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = do_nothing_hook\n kws['save_lin_log_scale'] = True\n # kws['save_log_scale'] = True\n # kws['hook_canvas_pre_build'] = canvas_hook\n # kws['hook_canvas_post_build'] = canvas_hook\n return varial.tools.Plotter(**kws)\n\ndef for_eff_factory(**kws):\n kws['filter_keyfunc'] = lambda w: 'TH1' in w.type\n kws['hook_loaded_histos'] = for_eff_plots_hook\n kws['save_lin_log_scale'] = True\n # kws['save_log_scale'] = True\n # kws['hook_canvas_pre_build'] = canvas_hook\n # kws['hook_canvas_post_build'] = canvas_hook\n return varial.tools.Plotter(**kws)\n\ndef create_name(name):\n return name+'v'+varial.settings.git_tag\n\n \n\ntagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')\n\ntagger.run()\n\n\n\np1 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname),\n # filter_keyfunc=lambda w: not w.name.startswith('cf_'),\n filter_keyfunc=select_histograms,\n plotter_factory=stack_histos_factory,\n combine_files=True\n)\n\np2 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname),\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),\n plotter_factory=norm_cf_factory,\n combine_files=True\n)\n\np3 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname),\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),\n plotter_factory=do_nothing_factory,\n combine_files=True\n)\n\np4 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname)+'split',\n pattern='v1.19_unmerged_files/*.root',\n filter_keyfunc=select_splithistograms,\n plotter_factory=for_eff_factory,\n combine_files=False\n)\n\np5 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname)+'split',\n # filter_keyfunc=lambda w: not w.name.startswith('cf_'),\n filter_keyfunc=select_splithistograms,\n plotter_factory=for_eff_factory,\n combine_files=True\n)\n\np6 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname)+'split',\n pattern='v1.19_unmerged_files/*.root',\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),\n plotter_factory=norm_cf_factory,\n combine_files=False\n)\n\np7 = varial.tools.mk_rootfile_plotter(\n name=create_name(dirname)+'split',\n pattern='v1.19_unmerged_files/*.root',\n filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),\n plotter_factory=do_nothing_factory,\n combine_files=False\n)\n\ntime.sleep(1)\np1.run()\np2.run()\np3.run()\n# p4.run()\np5.run()\n# p6.run()\n# p7.run()\nvarial.tools.WebCreator().run()\n# os.system('rm -r ~/www/TprimeAnalysis/%s' % create_name(dirname))\n# os.system('cp -r %s ~/www/TprimeAnalysis/' % create_name(dirname))\n",
"step-ids": [
10,
13,
14,
18,
20
]
}
|
[
10,
13,
14,
18,
20
] |
from search import SearchEngine
import tkinter as tk
if __name__ == "__main__":
ghettoGoogle = SearchEngine()
def searchButtonEvent():
search_query = searchQueryWidget.get()
search_results = ghettoGoogle.search(search_query)
resultsCanvas = tk.Tk()
if search_results == None:
tk.Label(resultsCanvas,text="No results",justify=tk.LEFT).pack(fill='both')
else:
searchTextBox = tk.Text(resultsCanvas,height=20,width=100)
searchTextBox.pack(side=tk.LEFT,fill=tk.Y)
scrollBar = tk.Scrollbar(resultsCanvas)
scrollBar.pack(side=tk.RIGHT,fill=tk.Y)
scrollBar.config(command=searchTextBox.yview)
searchTextBox.config(yscrollcommand=scrollBar.set)
searchTextBox.tag_config('Link',foreground='blue')
for i in range(len(search_results)):
searchTextBox.insert(tk.END,search_results[i][0]+"\n",'Link')
searchTextBox.insert(tk.END,search_results[i][1]+"\n\n")
canvas = tk.Tk()
tk.Label(canvas, text = "Enter search query").grid(row = 0)
searchQueryWidget = tk.Entry(canvas)
searchQueryWidget.grid(row=0,column=1)
tk.Button(canvas,text="Quit",command=canvas.quit).grid(row=1,column=0,sticky=tk.W)
tk.Button(canvas,text="Search",command=searchButtonEvent).grid(row=1,column=0,sticky=tk.W)
canvas.mainloop()
|
normal
|
{
"blob_id": "0cf90cd7704db9f7467e458b402fadb01c701148",
"index": 7307,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent():\n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None:\n tk.Label(resultsCanvas, text='No results', justify=tk.LEFT).pack(\n fill='both')\n else:\n searchTextBox = tk.Text(resultsCanvas, height=20, width=100)\n searchTextBox.pack(side=tk.LEFT, fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT, fill=tk.Y)\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n searchTextBox.tag_config('Link', foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END, search_results[i][0] + '\\n',\n 'Link')\n searchTextBox.insert(tk.END, search_results[i][1] + '\\n\\n')\n canvas = tk.Tk()\n tk.Label(canvas, text='Enter search query').grid(row=0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0, column=1)\n tk.Button(canvas, text='Quit', command=canvas.quit).grid(row=1, column=\n 0, sticky=tk.W)\n tk.Button(canvas, text='Search', command=searchButtonEvent).grid(row=1,\n column=0, sticky=tk.W)\n canvas.mainloop()\n",
"step-3": "from search import SearchEngine\nimport tkinter as tk\nif __name__ == '__main__':\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent():\n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None:\n tk.Label(resultsCanvas, text='No results', justify=tk.LEFT).pack(\n fill='both')\n else:\n searchTextBox = tk.Text(resultsCanvas, height=20, width=100)\n searchTextBox.pack(side=tk.LEFT, fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT, fill=tk.Y)\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n searchTextBox.tag_config('Link', foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END, search_results[i][0] + '\\n',\n 'Link')\n searchTextBox.insert(tk.END, search_results[i][1] + '\\n\\n')\n canvas = tk.Tk()\n tk.Label(canvas, text='Enter search query').grid(row=0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0, column=1)\n tk.Button(canvas, text='Quit', command=canvas.quit).grid(row=1, column=\n 0, sticky=tk.W)\n tk.Button(canvas, text='Search', command=searchButtonEvent).grid(row=1,\n column=0, sticky=tk.W)\n canvas.mainloop()\n",
"step-4": "from search import SearchEngine\nimport tkinter as tk \n\nif __name__ == \"__main__\":\n ghettoGoogle = SearchEngine()\n\n def searchButtonEvent(): \n search_query = searchQueryWidget.get()\n search_results = ghettoGoogle.search(search_query)\n resultsCanvas = tk.Tk()\n if search_results == None: \n tk.Label(resultsCanvas,text=\"No results\",justify=tk.LEFT).pack(fill='both') \n else: \n searchTextBox = tk.Text(resultsCanvas,height=20,width=100)\n searchTextBox.pack(side=tk.LEFT,fill=tk.Y)\n scrollBar = tk.Scrollbar(resultsCanvas)\n scrollBar.pack(side=tk.RIGHT,fill=tk.Y)\n\n scrollBar.config(command=searchTextBox.yview)\n searchTextBox.config(yscrollcommand=scrollBar.set)\n\n searchTextBox.tag_config('Link',foreground='blue')\n for i in range(len(search_results)):\n searchTextBox.insert(tk.END,search_results[i][0]+\"\\n\",'Link')\n searchTextBox.insert(tk.END,search_results[i][1]+\"\\n\\n\")\n\n canvas = tk.Tk()\n tk.Label(canvas, text = \"Enter search query\").grid(row = 0)\n searchQueryWidget = tk.Entry(canvas)\n searchQueryWidget.grid(row=0,column=1)\n \n tk.Button(canvas,text=\"Quit\",command=canvas.quit).grid(row=1,column=0,sticky=tk.W)\n tk.Button(canvas,text=\"Search\",command=searchButtonEvent).grid(row=1,column=0,sticky=tk.W)\n canvas.mainloop() \n\n \n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from layers import TrueSkillFactorGraph
from math import e, sqrt
from numerics import atLeast, _Vector, _DiagonalMatrix, Matrix
from objects import SkillCalculator, SupportedOptions, argumentNotNone, \
getPartialPlayPercentage, sortByRank
class FactorGraphTrueSkillCalculator(SkillCalculator):
def __init__(self):
super(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))
def calculateNewRatings(self, gameInfo, teams, teamRanks):
argumentNotNone(gameInfo, "gameInfo")
self._validateTeamCountAndPlayersCountPerTeam(teams)
teams, teamRanks = sortByRank(teams, teamRanks)
factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)
factorGraph.buildGraph()
factorGraph.runSchedule()
return factorGraph.getUpdatedRatings()
def calculateMatchQuality(self, gameInfo, teams):
skillsMatrix = self._getPlayerCovarianceMatrix(teams)
meanVector = self._getPlayerMeansVector(teams)
meanVectorTranspose = meanVector.transpose
playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)
playerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose
betaSquared = gameInfo.beta**2.0
start = meanVectorTranspose * playerTeamAssignmentsMatrix
aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix
aTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix
middle = aTa + aTSA
middleInverse = middle.inverse
end = playerTeamAssignmentsMatrixTranspose * meanVector
expPartMatrix = (start * middleInverse * end) * -0.5
expPart = expPartMatrix.determinant
sqrtPartNumerator = aTa.determinant
sqrtPartDenominator = middle.determinant
sqrtPart = sqrtPartNumerator / sqrtPartDenominator
result = (e**expPart) * sqrt(sqrtPart)
return result
def _getPlayerMeansVector(self, teamAssignmentsList):
return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))
def _getPlayerCovarianceMatrix(self, teamAssignmentsList):
return _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))
def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):
playerRatingValues = list()
for currentTeam in teamAssigmentsList:
for currentRating in currentTeam.values:
playerRatingValues.append(playerRatingFunction(currentRating))
return playerRatingValues
def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):
playerAssignments = list()
totalPreviousPlayers = 0
for i in range(len(teamAssignmentsList)):
currentTeam = teamAssignmentsList[i]
currentRowValues = [0] * totalPreviousPlayers
playerAssignments.append(currentRowValues)
for currentRating in currentTeam:
currentRowValues.append(getPartialPlayPercentage(currentRating[0]))
totalPreviousPlayers += 1
nextTeam = teamAssignmentsList[i + 1]
for nextTeamPlayerPair in nextTeam:
currentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))
return Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)
|
normal
|
{
"blob_id": "009be282e45d191eb8f4d7d2986a2f182d64c1dd",
"index": 2935,
"step-1": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n <mask token>\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-3": "<mask token>\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-4": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, getPartialPlayPercentage, sortByRank\n\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\n def __init__(self):\n super(FactorGraphTrueSkillCalculator, self).__init__(\n SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE,\n atLeast(2), atLeast(1))\n\n def calculateNewRatings(self, gameInfo, teams, teamRanks):\n argumentNotNone(gameInfo, 'gameInfo')\n self._validateTeamCountAndPlayersCountPerTeam(teams)\n teams, teamRanks = sortByRank(teams, teamRanks)\n factorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n factorGraph.buildGraph()\n factorGraph.runSchedule()\n return factorGraph.getUpdatedRatings()\n\n def calculateMatchQuality(self, gameInfo, teams):\n skillsMatrix = self._getPlayerCovarianceMatrix(teams)\n meanVector = self._getPlayerMeansVector(teams)\n meanVectorTranspose = meanVector.transpose\n playerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(\n teams, meanVector.rows)\n playerTeamAssignmentsMatrixTranspose = (playerTeamAssignmentsMatrix\n .transpose)\n betaSquared = gameInfo.beta ** 2.0\n start = meanVectorTranspose * playerTeamAssignmentsMatrix\n aTa = (betaSquared * playerTeamAssignmentsMatrixTranspose *\n playerTeamAssignmentsMatrix)\n aTSA = (playerTeamAssignmentsMatrixTranspose * skillsMatrix *\n playerTeamAssignmentsMatrix)\n middle = aTa + aTSA\n middleInverse = middle.inverse\n end = playerTeamAssignmentsMatrixTranspose * meanVector\n expPartMatrix = start * middleInverse * end * -0.5\n expPart = expPartMatrix.determinant\n sqrtPartNumerator = aTa.determinant\n sqrtPartDenominator = middle.determinant\n sqrtPart = sqrtPartNumerator / sqrtPartDenominator\n result = e ** expPart * sqrt(sqrtPart)\n return result\n\n def _getPlayerMeansVector(self, teamAssignmentsList):\n return _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda\n rating: rating.mean))\n\n def _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n return _DiagonalMatrix(self._getPlayerRatingValues(\n teamAssignmentsList, lambda rating: rating.standardDeviation **\n 2.0))\n\n def _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n playerRatingValues = list()\n for currentTeam in teamAssigmentsList:\n for currentRating in currentTeam.values:\n playerRatingValues.append(playerRatingFunction(currentRating))\n return playerRatingValues\n\n def _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList,\n totalPlayers):\n playerAssignments = list()\n totalPreviousPlayers = 0\n for i in range(len(teamAssignmentsList)):\n currentTeam = teamAssignmentsList[i]\n currentRowValues = [0] * totalPreviousPlayers\n playerAssignments.append(currentRowValues)\n for currentRating in currentTeam:\n currentRowValues.append(getPartialPlayPercentage(\n currentRating[0]))\n totalPreviousPlayers += 1\n nextTeam = teamAssignmentsList[i + 1]\n for nextTeamPlayerPair in nextTeam:\n currentRowValues.append(-1 * getPartialPlayPercentage(\n nextTeamPlayerPair[0]))\n return Matrix(totalPlayers, len(teamAssignmentsList) - 1,\n playerAssignments)\n",
"step-5": "from layers import TrueSkillFactorGraph\nfrom math import e, sqrt\nfrom numerics import atLeast, _Vector, _DiagonalMatrix, Matrix\nfrom objects import SkillCalculator, SupportedOptions, argumentNotNone, \\\n\tgetPartialPlayPercentage, sortByRank\n\nclass FactorGraphTrueSkillCalculator(SkillCalculator):\n\tdef __init__(self):\n\t\tsuper(FactorGraphTrueSkillCalculator, self).__init__(SupportedOptions.PARTIAL_PLAY | SupportedOptions.PARTIAL_UPDATE, atLeast(2), atLeast(1))\n\t\n\tdef calculateNewRatings(self, gameInfo, teams, teamRanks):\n\t\targumentNotNone(gameInfo, \"gameInfo\")\n\t\tself._validateTeamCountAndPlayersCountPerTeam(teams)\n\t\tteams, teamRanks = sortByRank(teams, teamRanks)\n\t\t\n\t\tfactorGraph = TrueSkillFactorGraph(gameInfo, teams, teamRanks)\n\t\tfactorGraph.buildGraph()\n\t\tfactorGraph.runSchedule()\t\n\t\t\n\t\treturn factorGraph.getUpdatedRatings()\n\t\t\n\tdef calculateMatchQuality(self, gameInfo, teams):\n\t\tskillsMatrix = self._getPlayerCovarianceMatrix(teams)\n\t\tmeanVector = self._getPlayerMeansVector(teams)\n\t\tmeanVectorTranspose = meanVector.transpose\n\t\t\n\t\tplayerTeamAssignmentsMatrix = self._createPlayerTeamAssignmentMatrix(teams, meanVector.rows)\n\t\tplayerTeamAssignmentsMatrixTranspose = playerTeamAssignmentsMatrix.transpose\n\t\t\n\t\tbetaSquared = gameInfo.beta**2.0\n\t\t\n\t\tstart = meanVectorTranspose * playerTeamAssignmentsMatrix\n\t\taTa = (betaSquared * playerTeamAssignmentsMatrixTranspose) * playerTeamAssignmentsMatrix\n\t\taTSA = playerTeamAssignmentsMatrixTranspose * skillsMatrix * playerTeamAssignmentsMatrix\n\t\tmiddle = aTa + aTSA\n\t\t\n\t\tmiddleInverse = middle.inverse\n\t\t\n\t\tend = playerTeamAssignmentsMatrixTranspose * meanVector\n\t\t\n\t\texpPartMatrix = (start * middleInverse * end) * -0.5\n\t\texpPart = expPartMatrix.determinant\n\t\t\n\t\tsqrtPartNumerator = aTa.determinant\n\t\tsqrtPartDenominator = middle.determinant\n\t\tsqrtPart = sqrtPartNumerator / sqrtPartDenominator\n\t\t\n\t\tresult = (e**expPart) * sqrt(sqrtPart)\n\t\t\n\t\treturn result\n\t\t\n\tdef _getPlayerMeansVector(self, teamAssignmentsList):\n\t\treturn _Vector(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.mean))\n\t\t\n\tdef _getPlayerCovarianceMatrix(self, teamAssignmentsList):\n\t\treturn _DiagonalMatrix(self._getPlayerRatingValues(teamAssignmentsList, lambda rating: rating.standardDeviation**2.0))\n\t\t\n\tdef _getPlayerRatingValues(self, teamAssigmentsList, playerRatingFunction):\n\t\tplayerRatingValues = list()\n\t\tfor currentTeam in teamAssigmentsList:\n\t\t\tfor currentRating in currentTeam.values:\n\t\t\t\tplayerRatingValues.append(playerRatingFunction(currentRating))\n\t\treturn playerRatingValues\n\t\n\tdef _createPlayerTeamAssignmentMatrix(self, teamAssignmentsList, totalPlayers):\n\t\tplayerAssignments = list()\n\t\ttotalPreviousPlayers = 0\n\t\t\n\t\tfor i in range(len(teamAssignmentsList)):\n\t\t\tcurrentTeam = teamAssignmentsList[i]\n\t\t\tcurrentRowValues = [0] * totalPreviousPlayers\n\t\t\tplayerAssignments.append(currentRowValues)\n\t\t\t\n\t\t\tfor currentRating in currentTeam:\n\t\t\t\tcurrentRowValues.append(getPartialPlayPercentage(currentRating[0]))\n\t\t\t\ttotalPreviousPlayers += 1\n\t\t\t\t\n\t\t\tnextTeam = teamAssignmentsList[i + 1]\n\t\t\tfor nextTeamPlayerPair in nextTeam:\n\t\t\t\tcurrentRowValues.append(-1 * getPartialPlayPercentage(nextTeamPlayerPair[0]))\n\t\t\t\t\n\t\treturn Matrix(totalPlayers, len(teamAssignmentsList) - 1, playerAssignments)\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
#!/usr/bin/env python
import rospy
import rosnode
import csv
import datetime
import rosbag
import sys
import os
import matplotlib.pyplot as plt
import argparse
import math
from math import hypot
import numpy as np
from sensor_msgs.msg import LaserScan
from std_msgs.msg import String
import yaml as yaml
start_time = None
value_dict = {}
combine = False
#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario1/fahrt3.bag'
#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario2/fahrt1.bag'
#bag_dir = '/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/reference_bag.bag'
'''
"rosservice call /change_material \"{name: \"Gazebo/Grey\", reflectivity: 0.2, transmission:\
\ 0.0, absorption: 0.1, angular_factor: 0.3}\""
'''
def compute_std(mean, liste):
temp = []
for item in liste:
temp.append((mean - item)**2)
nm = sum(temp)/ float(len(temp))
return math.sqrt(nm)
def load_file(filePath,file_name):
dict_ = {}
rospy.loginfo("Loading: %s",filePath+"/"+file_name)
try:
rospy.loginfo("Loading: %s",file_name)
file = open(filePath+file_name,'r')
dict_ = yaml.load(file)
except yaml.YAMLError as exc:
print(exc)
rospy.logerr('Failed to load: %s From: %s',file_name,filePath)
file.close()
return dict_
def get_params(temp):
p = {}
#temp = temp.split("{")[1]
temp = temp.split(",")
temp2 = temp[1].split(":")[1]
p['reflectivity']=float(temp2.replace(" ", "").replace("\\",""))
temp2 = temp[2].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
p['transmission'] = float(temp2)
temp2 = temp[3].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
p['absorption'] = float(temp2)
temp2 = temp[4].split(":")[1]
temp2 = temp2.replace("\\","").replace(" ","")
temp2 = temp2.replace("}","").replace("\"","")
p['angular_factor'] = float(temp2)
return p
def init():
rospy.init_node("monitoring_bag_topic_extract")
def get_bag_data():
path = "/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/"
ref = "reference_angels.yaml"
ref_dict = load_file(path,ref)
angels = ref_dict['angels2']
indexes = ref_dict['index']
ranges = ref_dict['ranges']
for f in os.listdir(path):
if rospy.is_shutdown():
break
if f.startswith("bag") and f.endswith(".bag"):
print "Loading Bag: "+path+f
bag = rosbag.Bag(path+f)
params = {}
scans = []
for topic, msg, t in bag.read_messages():
if topic == "/material_laser_scan":
scans.append(msg.ranges)
if topic == "/info_vals" and not params:
params = get_params(msg.data.split("{")[1])
# compute mean_err, std_dev, data_loss per value
scan_info = {}
for scan in scans:
for idx, val in enumerate(scan):
if idx in indexes:
#all val should be on the plate
i = indexes.index(idx)
if idx not in scan_info.keys():
#print str(val)
scan_info[idx] = [0,0,0.0,[],0.0,0.0]
scan_info[idx][4] = round(ranges[i], 5)
scan_info[idx][5] = angels[i]
if val <= 0.8:
scan_info[idx][1] +=1
scan_info[idx][2] +=val
scan_info[idx][3].append(val)
else:
scan_info[idx][0] +=1
final_d = {}
final_d["params"] = params
for key in scan_info.keys():
final_d[key] = {}
final_d[key]['ref_range'] = scan_info[key][4]
final_d[key]['angle'] = scan_info[key][5]
if scan_info[key][3]:
#if there is at least one element
mean = scan_info[key][2] / scan_info[key][1]
final_d[key]['mean_range'] = mean
std = compute_std(mean, scan_info[key][3])
final_d[key]['stdev'] = std
final_d[key]['loss'] = float(scan_info[key][0])/float((scan_info[key][1]+scan_info[key][0]))
else:
final_d[key]['mean_range'] = 0.0
final_d[key]['stdev'] = 0.0
final_d[key]['loss'] = 1.0
f1 = yaml.dump(final_d, default_flow_style=False)
try:
f = open('/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/yaml/'+f+'.yaml','w')
f.write(f1)
f.close()
except Exception as inst:
rospy.loginfo('%s',str(inst))
if __name__ == '__main__':
init()
get_bag_data()
|
normal
|
{
"blob_id": "c00a8bfec46ed829e413257bf97c44add564080d",
"index": 8349,
"step-1": "#!/usr/bin/env python\nimport rospy\nimport rosnode\nimport csv\nimport datetime\nimport rosbag\nimport sys\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\nimport math\nfrom math import hypot\nimport numpy as np\nfrom sensor_msgs.msg import LaserScan\nfrom std_msgs.msg import String\nimport yaml as yaml\nstart_time = None\nvalue_dict = {}\ncombine = False\n#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario1/fahrt3.bag'\n#bag_dir = '/home/michael/youbot_local_dev/youbot_rosbag_20180828_szenario2/fahrt1.bag'\n#bag_dir = '/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/reference_bag.bag'\n'''\n\"rosservice call /change_material \\\"{name: \\\"Gazebo/Grey\\\", reflectivity: 0.2, transmission:\\\n \\ 0.0, absorption: 0.1, angular_factor: 0.3}\\\"\"\n'''\n\ndef compute_std(mean, liste):\n temp = []\n for item in liste:\n temp.append((mean - item)**2)\n nm = sum(temp)/ float(len(temp))\n return math.sqrt(nm)\n\ndef load_file(filePath,file_name):\n dict_ = {}\n rospy.loginfo(\"Loading: %s\",filePath+\"/\"+file_name)\n try:\n rospy.loginfo(\"Loading: %s\",file_name)\n file = open(filePath+file_name,'r')\n dict_ = yaml.load(file)\n except yaml.YAMLError as exc:\n print(exc)\n rospy.logerr('Failed to load: %s From: %s',file_name,filePath)\n file.close()\n return dict_\n\ndef get_params(temp):\n p = {}\n #temp = temp.split(\"{\")[1]\n temp = temp.split(\",\")\n temp2 = temp[1].split(\":\")[1]\n p['reflectivity']=float(temp2.replace(\" \", \"\").replace(\"\\\\\",\"\"))\n temp2 = temp[2].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n p['transmission'] = float(temp2)\n temp2 = temp[3].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n p['absorption'] = float(temp2)\n temp2 = temp[4].split(\":\")[1]\n temp2 = temp2.replace(\"\\\\\",\"\").replace(\" \",\"\")\n temp2 = temp2.replace(\"}\",\"\").replace(\"\\\"\",\"\")\n p['angular_factor'] = float(temp2)\n return p\n\ndef init():\n rospy.init_node(\"monitoring_bag_topic_extract\")\n\ndef get_bag_data():\n path = \"/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/\"\n ref = \"reference_angels.yaml\"\n ref_dict = load_file(path,ref)\n angels = ref_dict['angels2']\n indexes = ref_dict['index']\n ranges = ref_dict['ranges']\n for f in os.listdir(path):\n if rospy.is_shutdown():\n break\n if f.startswith(\"bag\") and f.endswith(\".bag\"):\n print \"Loading Bag: \"+path+f\n bag = rosbag.Bag(path+f)\n params = {}\n scans = []\n for topic, msg, t in bag.read_messages():\n if topic == \"/material_laser_scan\":\n scans.append(msg.ranges)\n if topic == \"/info_vals\" and not params:\n params = get_params(msg.data.split(\"{\")[1])\n # compute mean_err, std_dev, data_loss per value\n scan_info = {}\n for scan in scans:\n for idx, val in enumerate(scan):\n if idx in indexes:\n #all val should be on the plate\n i = indexes.index(idx)\n if idx not in scan_info.keys():\n #print str(val)\n scan_info[idx] = [0,0,0.0,[],0.0,0.0]\n scan_info[idx][4] = round(ranges[i], 5)\n scan_info[idx][5] = angels[i]\n if val <= 0.8:\n scan_info[idx][1] +=1\n scan_info[idx][2] +=val\n scan_info[idx][3].append(val)\n else:\n scan_info[idx][0] +=1\n final_d = {}\n final_d[\"params\"] = params\n for key in scan_info.keys():\n final_d[key] = {}\n final_d[key]['ref_range'] = scan_info[key][4]\n final_d[key]['angle'] = scan_info[key][5]\n if scan_info[key][3]:\n #if there is at least one element\n mean = scan_info[key][2] / scan_info[key][1]\n\n final_d[key]['mean_range'] = mean\n std = compute_std(mean, scan_info[key][3])\n\n final_d[key]['stdev'] = std\n final_d[key]['loss'] = float(scan_info[key][0])/float((scan_info[key][1]+scan_info[key][0]))\n else:\n final_d[key]['mean_range'] = 0.0\n final_d[key]['stdev'] = 0.0\n final_d[key]['loss'] = 1.0\n\n f1 = yaml.dump(final_d, default_flow_style=False)\n try:\n f = open('/home/alex/wentz_catkin_ws/src/automatic_simulation/tests/yaml/'+f+'.yaml','w') \n f.write(f1)\n f.close()\n except Exception as inst:\n rospy.loginfo('%s',str(inst))\n\n\nif __name__ == '__main__':\n init()\n get_bag_data()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fight_use(user, reply, room):
return 200
<|reserved_special_token_1|>
name = 'Ледяная скорбь'
description = (
'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'
)
price = 3000
fightable = True
def fight_use(user, reply, room):
return 200
<|reserved_special_token_1|>
name = 'Ледяная скорбь'
description = 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'
price = 3000
fightable = True
def fight_use(user, reply, room):
return 200
|
flexible
|
{
"blob_id": "7254e74ff3f562613cc610e4816a2d92b6b1cd4c",
"index": 6074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fight_use(user, reply, room):\n return 200\n",
"step-3": "name = 'Ледяная скорбь'\ndescription = (\n 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'\n )\nprice = 3000\nfightable = True\n\n\ndef fight_use(user, reply, room):\n return 200\n",
"step-4": "name = 'Ледяная скорбь'\ndescription = 'Тот кто держит этот клинок, должен обладать бесконечной силой. Подобно тому, как он разрывает плоть, он разрывает души.'\nprice = 3000\n\nfightable = True\n\ndef fight_use(user, reply, room):\n\treturn 200",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
array = [1, 7, 3, 8, 9, 2, 4]
index = 0
while (index < len(array)):
count = 0
while(count <= len(array)-2):
if(count == len(array)-1):
break
if (array[count] > array[count+1]):
sift = array[count]
array[count] = array[count+1]
array[count+1] = sift
count = count + 1
index = index + 1
print (array)
|
normal
|
{
"blob_id": "fc8976141a19afd099f92cbbdb578e9c620cb745",
"index": 5075,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile index < len(array):\n count = 0\n while count <= len(array) - 2:\n if count == len(array) - 1:\n break\n if array[count] > array[count + 1]:\n sift = array[count]\n array[count] = array[count + 1]\n array[count + 1] = sift\n count = count + 1\n index = index + 1\nprint(array)\n",
"step-3": "array = [1, 7, 3, 8, 9, 2, 4]\nindex = 0\nwhile index < len(array):\n count = 0\n while count <= len(array) - 2:\n if count == len(array) - 1:\n break\n if array[count] > array[count + 1]:\n sift = array[count]\n array[count] = array[count + 1]\n array[count + 1] = sift\n count = count + 1\n index = index + 1\nprint(array)\n",
"step-4": "array = [1, 7, 3, 8, 9, 2, 4]\nindex = 0\nwhile (index < len(array)):\n count = 0\n while(count <= len(array)-2):\n if(count == len(array)-1):\n break\n if (array[count] > array[count+1]):\n sift = array[count]\n array[count] = array[count+1]\n array[count+1] = sift\n count = count + 1\n index = index + 1\nprint (array)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
<|reserved_special_token_0|>
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
<|reserved_special_token_0|>
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
<|reserved_special_token_0|>
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
<|reserved_special_token_1|>
from __future__ import print_function, division, absolute_import
import pytest
import os
from flask import template_rendered
from flipper.app import create_app
from contextlib import contextmanager
@contextmanager
def captured_templates(app):
""" Records which templates are used """
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
""" Fixture that returns which jinja template used """
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
""" Flask application """
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
""" Fixture to create an app with a test Flask base url
Returns the client fixture
"""
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
""" Fixture to monkeypatch the flipper release environment variable """
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
<|reserved_special_token_1|>
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2018-08-16 11:43:42
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-08-16 11:58:06
from __future__ import print_function, division, absolute_import
import pytest
import os
from flask import template_rendered
from flipper.app import create_app
from contextlib import contextmanager
@contextmanager
def captured_templates(app):
''' Records which templates are used '''
recorded = []
def record(app, template, context, **extra):
recorded.append((template, context))
template_rendered.connect(record)
yield recorded
template_rendered.disconnect(record)
@pytest.fixture()
def get_templates(app):
''' Fixture that returns which jinja template used '''
with captured_templates(app) as templates:
yield templates
@pytest.fixture
def app():
''' Flask application '''
app = create_app()
return app
@pytest.yield_fixture
def testctx(monkeypatch):
''' Fixture to create an app with a test Flask base url
Returns only the request context to allow for use for url_for
'''
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
ctx = app.test_request_context()
ctx.push()
yield
ctx.pop()
@pytest.yield_fixture
def testclient(monkeypatch):
''' Fixture to create an app with a test Flask base url
Returns the client fixture
'''
monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')
app = create_app()
app.testing = True
with app.test_client() as client:
yield client
# global releases to loop over
releases = ['dr15', 'dr16']
@pytest.fixture(params=releases)
def monkeyrelease(monkeypatch, request):
''' Fixture to monkeypatch the flipper release environment variable '''
monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)
yield request.param
|
flexible
|
{
"blob_id": "bd00644b9cf019fe8c86d52494389b7f0f03d3c3",
"index": 1276,
"step-1": "<mask token>\n\n\n@contextmanager\ndef captured_templates(app):\n \"\"\" Records which templates are used \"\"\"\n recorded = []\n\n def record(app, template, context, **extra):\n recorded.append((template, context))\n template_rendered.connect(record)\n yield recorded\n template_rendered.disconnect(record)\n\n\n<mask token>\n\n\[email protected]\ndef app():\n \"\"\" Flask application \"\"\"\n app = create_app()\n return app\n\n\[email protected]_fixture\ndef testctx(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns only the request context to allow for use for url_for\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n ctx = app.test_request_context()\n ctx.push()\n yield\n ctx.pop()\n\n\[email protected]_fixture\ndef testclient(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns the client fixture\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\n<mask token>\n\n\[email protected](params=releases)\ndef monkeyrelease(monkeypatch, request):\n \"\"\" Fixture to monkeypatch the flipper release environment variable \"\"\"\n monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)\n yield request.param\n",
"step-2": "<mask token>\n\n\n@contextmanager\ndef captured_templates(app):\n \"\"\" Records which templates are used \"\"\"\n recorded = []\n\n def record(app, template, context, **extra):\n recorded.append((template, context))\n template_rendered.connect(record)\n yield recorded\n template_rendered.disconnect(record)\n\n\[email protected]()\ndef get_templates(app):\n \"\"\" Fixture that returns which jinja template used \"\"\"\n with captured_templates(app) as templates:\n yield templates\n\n\[email protected]\ndef app():\n \"\"\" Flask application \"\"\"\n app = create_app()\n return app\n\n\[email protected]_fixture\ndef testctx(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns only the request context to allow for use for url_for\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n ctx = app.test_request_context()\n ctx.push()\n yield\n ctx.pop()\n\n\[email protected]_fixture\ndef testclient(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns the client fixture\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\n<mask token>\n\n\[email protected](params=releases)\ndef monkeyrelease(monkeypatch, request):\n \"\"\" Fixture to monkeypatch the flipper release environment variable \"\"\"\n monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)\n yield request.param\n",
"step-3": "<mask token>\n\n\n@contextmanager\ndef captured_templates(app):\n \"\"\" Records which templates are used \"\"\"\n recorded = []\n\n def record(app, template, context, **extra):\n recorded.append((template, context))\n template_rendered.connect(record)\n yield recorded\n template_rendered.disconnect(record)\n\n\[email protected]()\ndef get_templates(app):\n \"\"\" Fixture that returns which jinja template used \"\"\"\n with captured_templates(app) as templates:\n yield templates\n\n\[email protected]\ndef app():\n \"\"\" Flask application \"\"\"\n app = create_app()\n return app\n\n\[email protected]_fixture\ndef testctx(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns only the request context to allow for use for url_for\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n ctx = app.test_request_context()\n ctx.push()\n yield\n ctx.pop()\n\n\[email protected]_fixture\ndef testclient(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns the client fixture\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\nreleases = ['dr15', 'dr16']\n\n\[email protected](params=releases)\ndef monkeyrelease(monkeypatch, request):\n \"\"\" Fixture to monkeypatch the flipper release environment variable \"\"\"\n monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)\n yield request.param\n",
"step-4": "from __future__ import print_function, division, absolute_import\nimport pytest\nimport os\nfrom flask import template_rendered\nfrom flipper.app import create_app\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef captured_templates(app):\n \"\"\" Records which templates are used \"\"\"\n recorded = []\n\n def record(app, template, context, **extra):\n recorded.append((template, context))\n template_rendered.connect(record)\n yield recorded\n template_rendered.disconnect(record)\n\n\[email protected]()\ndef get_templates(app):\n \"\"\" Fixture that returns which jinja template used \"\"\"\n with captured_templates(app) as templates:\n yield templates\n\n\[email protected]\ndef app():\n \"\"\" Flask application \"\"\"\n app = create_app()\n return app\n\n\[email protected]_fixture\ndef testctx(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns only the request context to allow for use for url_for\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n ctx = app.test_request_context()\n ctx.push()\n yield\n ctx.pop()\n\n\[email protected]_fixture\ndef testclient(monkeypatch):\n \"\"\" Fixture to create an app with a test Flask base url\n\n Returns the client fixture\n\n \"\"\"\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\nreleases = ['dr15', 'dr16']\n\n\[email protected](params=releases)\ndef monkeyrelease(monkeypatch, request):\n \"\"\" Fixture to monkeypatch the flipper release environment variable \"\"\"\n monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)\n yield request.param\n",
"step-5": "# !usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Licensed under a 3-clause BSD license.\n#\n# @Author: Brian Cherinka\n# @Date: 2018-08-16 11:43:42\n# @Last modified by: Brian Cherinka\n# @Last Modified time: 2018-08-16 11:58:06\n\nfrom __future__ import print_function, division, absolute_import\nimport pytest\nimport os\nfrom flask import template_rendered\nfrom flipper.app import create_app\nfrom contextlib import contextmanager\n\n\n@contextmanager\ndef captured_templates(app):\n ''' Records which templates are used '''\n recorded = []\n\n def record(app, template, context, **extra):\n recorded.append((template, context))\n\n template_rendered.connect(record)\n yield recorded\n template_rendered.disconnect(record)\n\n\[email protected]()\ndef get_templates(app):\n ''' Fixture that returns which jinja template used '''\n with captured_templates(app) as templates:\n yield templates\n\n\[email protected]\ndef app():\n ''' Flask application '''\n app = create_app()\n return app\n\n\[email protected]_fixture\ndef testctx(monkeypatch):\n ''' Fixture to create an app with a test Flask base url\n\n Returns only the request context to allow for use for url_for\n\n '''\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n ctx = app.test_request_context()\n ctx.push()\n yield\n ctx.pop()\n\n\[email protected]_fixture\ndef testclient(monkeypatch):\n ''' Fixture to create an app with a test Flask base url\n\n Returns the client fixture\n\n '''\n monkeypatch.setenv('FLIPPER_BASE', 'test/flipper')\n app = create_app()\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\n# global releases to loop over\nreleases = ['dr15', 'dr16']\n\n\[email protected](params=releases)\ndef monkeyrelease(monkeypatch, request):\n ''' Fixture to monkeypatch the flipper release environment variable '''\n monkeypatch.setitem(os.environ, 'FLIPPER_RELEASE', request.param)\n yield request.param\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile('Server/{}/user.json'.format(server_id)):
with open('Server/{}/user.json'.format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
<|reserved_special_token_0|>
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile('Server/{}/user.json'.format(server_id)):
with open('Server/{}/user.json'.format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_category(server_id: str, name: str, category_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['category'] = category_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.chdir('/home/niko/data/Marvin')
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile('Server/{}/user.json'.format(server_id)):
with open('Server/{}/user.json'.format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_category(server_id: str, name: str, category_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['category'] = category_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
<|reserved_special_token_1|>
import json
import os
from lib.create import create_server, create_user
os.chdir('/home/niko/data/Marvin')
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile('Server/{}/user.json'.format(server_id)):
with open('Server/{}/user.json'.format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_category(server_id: str, name: str, category_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
data[name]['category'] = category_id
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'
) as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:
json.dump(data, fp, indent=4)
else:
return False
<|reserved_special_token_1|>
import json
import os
from lib.create import create_server, create_user
os.chdir(r'/home/niko/data/Marvin')
def edit_user_stats(server_id: str, user_id: str, stat: str, datas):
create_user(server_id, user_id)
if os.path.isfile("Server/{}/user.json".format(server_id)):
with open("Server/{}/user.json".format(server_id), 'r') as fp:
data = json.load(fp)
data[user_id][stat] = datas
with open("Server/{}/user.json".format(server_id, user_id), 'w') as fp:
json.dump(data, fp, indent=4)
def set_message(server_id: str, name: str, message_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['message'] = message_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_log(server_id: str, name: str, channel_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['log'] = channel_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_category(server_id: str, name: str, category_id: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
data[name]['category'] = category_id
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def set_count(server_id: str, name: str):
create_server(server_id)
with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if name in data:
count = data[name]['ticket']
data[name]['ticket'] = count + 1
with open('Server/{}/ticket.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
def edit_setting(server_id: str, vari: str, new):
create_server(server_id)
with open('Server/{}/settings.json'.format(server_id), encoding='utf-8') as fp:
data = json.load(fp)
if vari in data:
data[vari] = new
with open('Server/{}/settings.json'.format(server_id), "w+") as fp:
json.dump(data, fp, indent=4)
else:
return False
|
flexible
|
{
"blob_id": "e6d506dd45e72ee7f0162a884981ee1156153d3d",
"index": 8661,
"step-1": "<mask token>\n\n\ndef edit_user_stats(server_id: str, user_id: str, stat: str, datas):\n create_user(server_id, user_id)\n if os.path.isfile('Server/{}/user.json'.format(server_id)):\n with open('Server/{}/user.json'.format(server_id), 'r') as fp:\n data = json.load(fp)\n data[user_id][stat] = datas\n with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\ndef set_message(server_id: str, name: str, message_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['message'] = message_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_log(server_id: str, name: str, channel_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['log'] = channel_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\n<mask token>\n\n\ndef set_count(server_id: str, name: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n count = data[name]['ticket']\n data[name]['ticket'] = count + 1\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef edit_setting(server_id: str, vari: str, new):\n create_server(server_id)\n with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if vari in data:\n data[vari] = new\n with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n",
"step-2": "<mask token>\n\n\ndef edit_user_stats(server_id: str, user_id: str, stat: str, datas):\n create_user(server_id, user_id)\n if os.path.isfile('Server/{}/user.json'.format(server_id)):\n with open('Server/{}/user.json'.format(server_id), 'r') as fp:\n data = json.load(fp)\n data[user_id][stat] = datas\n with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\ndef set_message(server_id: str, name: str, message_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['message'] = message_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_log(server_id: str, name: str, channel_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['log'] = channel_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_category(server_id: str, name: str, category_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['category'] = category_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_count(server_id: str, name: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n count = data[name]['ticket']\n data[name]['ticket'] = count + 1\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef edit_setting(server_id: str, vari: str, new):\n create_server(server_id)\n with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if vari in data:\n data[vari] = new\n with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n",
"step-3": "<mask token>\nos.chdir('/home/niko/data/Marvin')\n\n\ndef edit_user_stats(server_id: str, user_id: str, stat: str, datas):\n create_user(server_id, user_id)\n if os.path.isfile('Server/{}/user.json'.format(server_id)):\n with open('Server/{}/user.json'.format(server_id), 'r') as fp:\n data = json.load(fp)\n data[user_id][stat] = datas\n with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\ndef set_message(server_id: str, name: str, message_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['message'] = message_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_log(server_id: str, name: str, channel_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['log'] = channel_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_category(server_id: str, name: str, category_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['category'] = category_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_count(server_id: str, name: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n count = data[name]['ticket']\n data[name]['ticket'] = count + 1\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef edit_setting(server_id: str, vari: str, new):\n create_server(server_id)\n with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if vari in data:\n data[vari] = new\n with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n",
"step-4": "import json\nimport os\nfrom lib.create import create_server, create_user\nos.chdir('/home/niko/data/Marvin')\n\n\ndef edit_user_stats(server_id: str, user_id: str, stat: str, datas):\n create_user(server_id, user_id)\n if os.path.isfile('Server/{}/user.json'.format(server_id)):\n with open('Server/{}/user.json'.format(server_id), 'r') as fp:\n data = json.load(fp)\n data[user_id][stat] = datas\n with open('Server/{}/user.json'.format(server_id, user_id), 'w') as fp:\n json.dump(data, fp, indent=4)\n\n\ndef set_message(server_id: str, name: str, message_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['message'] = message_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_log(server_id: str, name: str, channel_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['log'] = channel_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_category(server_id: str, name: str, category_id: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n data[name]['category'] = category_id\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef set_count(server_id: str, name: str):\n create_server(server_id)\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if name in data:\n count = data[name]['ticket']\n data[name]['ticket'] = count + 1\n with open('Server/{}/ticket.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n\n\ndef edit_setting(server_id: str, vari: str, new):\n create_server(server_id)\n with open('Server/{}/settings.json'.format(server_id), encoding='utf-8'\n ) as fp:\n data = json.load(fp)\n if vari in data:\n data[vari] = new\n with open('Server/{}/settings.json'.format(server_id), 'w+') as fp:\n json.dump(data, fp, indent=4)\n else:\n return False\n",
"step-5": "import json\r\nimport os\r\n\r\nfrom lib.create import create_server, create_user\r\n\r\nos.chdir(r'/home/niko/data/Marvin')\r\n\r\n\r\ndef edit_user_stats(server_id: str, user_id: str, stat: str, datas):\r\n create_user(server_id, user_id)\r\n if os.path.isfile(\"Server/{}/user.json\".format(server_id)):\r\n with open(\"Server/{}/user.json\".format(server_id), 'r') as fp:\r\n data = json.load(fp)\r\n data[user_id][stat] = datas\r\n with open(\"Server/{}/user.json\".format(server_id, user_id), 'w') as fp:\r\n json.dump(data, fp, indent=4)\r\n\r\n\r\ndef set_message(server_id: str, name: str, message_id: str):\r\n create_server(server_id)\r\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:\r\n data = json.load(fp)\r\n if name in data:\r\n data[name]['message'] = message_id\r\n with open('Server/{}/ticket.json'.format(server_id), \"w+\") as fp:\r\n json.dump(data, fp, indent=4)\r\n else:\r\n return False\r\n\r\n\r\ndef set_log(server_id: str, name: str, channel_id: str):\r\n create_server(server_id)\r\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:\r\n data = json.load(fp)\r\n if name in data:\r\n data[name]['log'] = channel_id\r\n with open('Server/{}/ticket.json'.format(server_id), \"w+\") as fp:\r\n json.dump(data, fp, indent=4)\r\n else:\r\n return False\r\n\r\n\r\ndef set_category(server_id: str, name: str, category_id: str):\r\n create_server(server_id)\r\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:\r\n data = json.load(fp)\r\n if name in data:\r\n data[name]['category'] = category_id\r\n with open('Server/{}/ticket.json'.format(server_id), \"w+\") as fp:\r\n json.dump(data, fp, indent=4)\r\n else:\r\n return False\r\n\r\n\r\ndef set_count(server_id: str, name: str):\r\n create_server(server_id)\r\n with open('Server/{}/ticket.json'.format(server_id), encoding='utf-8') as fp:\r\n data = json.load(fp)\r\n if name in data:\r\n count = data[name]['ticket']\r\n data[name]['ticket'] = count + 1\r\n with open('Server/{}/ticket.json'.format(server_id), \"w+\") as fp:\r\n json.dump(data, fp, indent=4)\r\n else:\r\n return False\r\n\r\n\r\ndef edit_setting(server_id: str, vari: str, new):\r\n create_server(server_id)\r\n with open('Server/{}/settings.json'.format(server_id), encoding='utf-8') as fp:\r\n data = json.load(fp)\r\n if vari in data:\r\n data[vari] = new\r\n with open('Server/{}/settings.json'.format(server_id), \"w+\") as fp:\r\n json.dump(data, fp, indent=4)\r\n else:\r\n return False\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#Alexis Langlois
'''
Fichier de test pour l'algorithme Adaboost avec arbres de décision (@nbTrees).
'''
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from adaboost_trees import AdaboostTrees
#Trees
nbTrees = 20
#Train dataset
X = np.loadtxt('train_data')
y = np.loadtxt('train_labels')
X, y = shuffle(X, y)
#Data normalization
X -= X.min()
X /= X.max()
#Instanciation
forest = AdaboostTrees(nbTrees)
#Training
forest.train(X, y)
#Test dataset
X = np.loadtxt('test_data')
y = np.loadtxt('test_labels')
X, y = shuffle(X, y)
#Data normalization
X -= X.min()
X /= X.max()
#Predictions
predictions = forest.predict(X)
#Report
print classification_report(y, predicted)
print 'Accuracy: ' + str(accuracy_score(tags, preds))
|
normal
|
{
"blob_id": "b750673829873c136826ae539900451559c042c8",
"index": 5398,
"step-1": "#Alexis Langlois\n'''\nFichier de test pour l'algorithme Adaboost avec arbres de décision (@nbTrees).\n'''\n\nimport numpy as np\n\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import accuracy_score\nfrom adaboost_trees import AdaboostTrees\n\n\n#Trees\nnbTrees = 20\n\n\n#Train dataset\nX = np.loadtxt('train_data')\ny = np.loadtxt('train_labels')\nX, y = shuffle(X, y)\n\n\n#Data normalization\nX -= X.min()\nX /= X.max()\n\n\n#Instanciation\nforest = AdaboostTrees(nbTrees)\n\n\n#Training\nforest.train(X, y)\n\n\n#Test dataset\nX = np.loadtxt('test_data')\ny = np.loadtxt('test_labels')\nX, y = shuffle(X, y)\n\n\n#Data normalization\nX -= X.min()\nX /= X.max()\n\n\n#Predictions\npredictions = forest.predict(X)\n\n\n#Report\nprint classification_report(y, predicted)\nprint 'Accuracy: ' + str(accuracy_score(tags, preds))",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class InvitationSerializer(ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Invitation
fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'
class TeamSerializer(ModelSerializer):
slug = SlugField(required=False, validators=[UniqueValidator(queryset=
Team.objects.all())])
members = MembershipSerializer(source='membership_set', many=True,
read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source=
'pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations',
'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get('name', None)
validated_data['slug'] = validated_data.get('slug',
get_next_unique_team_slug(team_name))
return super().create(validated_data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MembershipSerializer(ModelSerializer):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
model = Membership
fields = 'id', 'first_name', 'last_name', 'display_name', 'role'
class InvitationSerializer(ModelSerializer):
id = ReadOnlyField()
invited_by = ReadOnlyField(source='invited_by.get_display_name')
class Meta:
model = Invitation
fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'
class TeamSerializer(ModelSerializer):
slug = SlugField(required=False, validators=[UniqueValidator(queryset=
Team.objects.all())])
members = MembershipSerializer(source='membership_set', many=True,
read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source=
'pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations',
'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get('name', None)
validated_data['slug'] = validated_data.get('slug',
get_next_unique_team_slug(team_name))
return super().create(validated_data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IllumiDeskUserSerializer(ModelSerializer):
class Meta:
model = IllumiDeskUser
fields = 'first_name', 'last_name', 'get_display_name'
abstract = True
class MembershipSerializer(ModelSerializer):
first_name = ReadOnlyField(source='user.first_name')
last_name = ReadOnlyField(source='user.last_name')
display_name = ReadOnlyField(source='user.get_display_name')
class Meta:
model = Membership
fields = 'id', 'first_name', 'last_name', 'display_name', 'role'
class InvitationSerializer(ModelSerializer):
id = ReadOnlyField()
invited_by = ReadOnlyField(source='invited_by.get_display_name')
class Meta:
model = Invitation
fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'
class TeamSerializer(ModelSerializer):
slug = SlugField(required=False, validators=[UniqueValidator(queryset=
Team.objects.all())])
members = MembershipSerializer(source='membership_set', many=True,
read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source=
'pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations',
'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get('name', None)
validated_data['slug'] = validated_data.get('slug',
get_next_unique_team_slug(team_name))
return super().create(validated_data)
<|reserved_special_token_1|>
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import ReadOnlyField
from rest_framework.serializers import SlugField
from rest_framework.validators import UniqueValidator
from django.db import models
from illumidesk.teams.util import get_next_unique_team_slug
from illumidesk.users.models import IllumiDeskUser
from .models import Invitation
from .models import Membership
from .models import Team
class IllumiDeskUserSerializer(ModelSerializer):
class Meta:
model = IllumiDeskUser
fields = 'first_name', 'last_name', 'get_display_name'
abstract = True
class MembershipSerializer(ModelSerializer):
first_name = ReadOnlyField(source='user.first_name')
last_name = ReadOnlyField(source='user.last_name')
display_name = ReadOnlyField(source='user.get_display_name')
class Meta:
model = Membership
fields = 'id', 'first_name', 'last_name', 'display_name', 'role'
class InvitationSerializer(ModelSerializer):
id = ReadOnlyField()
invited_by = ReadOnlyField(source='invited_by.get_display_name')
class Meta:
model = Invitation
fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'
class TeamSerializer(ModelSerializer):
slug = SlugField(required=False, validators=[UniqueValidator(queryset=
Team.objects.all())])
members = MembershipSerializer(source='membership_set', many=True,
read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source=
'pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations',
'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get('name', None)
validated_data['slug'] = validated_data.get('slug',
get_next_unique_team_slug(team_name))
return super().create(validated_data)
<|reserved_special_token_1|>
from rest_framework.serializers import ModelSerializer
from rest_framework.serializers import ReadOnlyField
from rest_framework.serializers import SlugField
from rest_framework.validators import UniqueValidator
from django.db import models
from illumidesk.teams.util import get_next_unique_team_slug
from illumidesk.users.models import IllumiDeskUser
from .models import Invitation
from .models import Membership
from .models import Team
class IllumiDeskUserSerializer(ModelSerializer):
class Meta:
model = IllumiDeskUser
fields = ('first_name', 'last_name', 'get_display_name')
abstract = True
class MembershipSerializer(ModelSerializer):
first_name = ReadOnlyField(source='user.first_name')
last_name = ReadOnlyField(source='user.last_name')
display_name = ReadOnlyField(source='user.get_display_name')
class Meta:
model = Membership
fields = ('id', 'first_name', 'last_name', 'display_name', 'role')
class InvitationSerializer(ModelSerializer):
id = ReadOnlyField()
invited_by = ReadOnlyField(source='invited_by.get_display_name')
class Meta:
model = Invitation
fields = ('id', 'team', 'email', 'role', 'invited_by', 'is_accepted')
class TeamSerializer(ModelSerializer):
slug = SlugField(
required=False,
validators=[UniqueValidator(queryset=Team.objects.all())],
)
members = MembershipSerializer(source='membership_set', many=True, read_only=True)
invitations = InvitationSerializer(many=True, read_only=True, source='pending_invitations')
dashboard_url = ReadOnlyField()
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'members', 'invitations', 'dashboard_url')
def create(self, validated_data):
team_name = validated_data.get("name", None)
validated_data['slug'] = validated_data.get("slug", get_next_unique_team_slug(team_name))
return super().create(validated_data)
|
flexible
|
{
"blob_id": "c005ae9dc8b50e24d72dbc99329bb5585d617081",
"index": 5590,
"step-1": "<mask token>\n\n\nclass InvitationSerializer(ModelSerializer):\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Invitation\n fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'\n\n\nclass TeamSerializer(ModelSerializer):\n slug = SlugField(required=False, validators=[UniqueValidator(queryset=\n Team.objects.all())])\n members = MembershipSerializer(source='membership_set', many=True,\n read_only=True)\n invitations = InvitationSerializer(many=True, read_only=True, source=\n 'pending_invitations')\n dashboard_url = ReadOnlyField()\n\n\n class Meta:\n model = Team\n fields = ('id', 'name', 'slug', 'members', 'invitations',\n 'dashboard_url')\n\n def create(self, validated_data):\n team_name = validated_data.get('name', None)\n validated_data['slug'] = validated_data.get('slug',\n get_next_unique_team_slug(team_name))\n return super().create(validated_data)\n",
"step-2": "<mask token>\n\n\nclass MembershipSerializer(ModelSerializer):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n model = Membership\n fields = 'id', 'first_name', 'last_name', 'display_name', 'role'\n\n\nclass InvitationSerializer(ModelSerializer):\n id = ReadOnlyField()\n invited_by = ReadOnlyField(source='invited_by.get_display_name')\n\n\n class Meta:\n model = Invitation\n fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'\n\n\nclass TeamSerializer(ModelSerializer):\n slug = SlugField(required=False, validators=[UniqueValidator(queryset=\n Team.objects.all())])\n members = MembershipSerializer(source='membership_set', many=True,\n read_only=True)\n invitations = InvitationSerializer(many=True, read_only=True, source=\n 'pending_invitations')\n dashboard_url = ReadOnlyField()\n\n\n class Meta:\n model = Team\n fields = ('id', 'name', 'slug', 'members', 'invitations',\n 'dashboard_url')\n\n def create(self, validated_data):\n team_name = validated_data.get('name', None)\n validated_data['slug'] = validated_data.get('slug',\n get_next_unique_team_slug(team_name))\n return super().create(validated_data)\n",
"step-3": "<mask token>\n\n\nclass IllumiDeskUserSerializer(ModelSerializer):\n\n\n class Meta:\n model = IllumiDeskUser\n fields = 'first_name', 'last_name', 'get_display_name'\n abstract = True\n\n\nclass MembershipSerializer(ModelSerializer):\n first_name = ReadOnlyField(source='user.first_name')\n last_name = ReadOnlyField(source='user.last_name')\n display_name = ReadOnlyField(source='user.get_display_name')\n\n\n class Meta:\n model = Membership\n fields = 'id', 'first_name', 'last_name', 'display_name', 'role'\n\n\nclass InvitationSerializer(ModelSerializer):\n id = ReadOnlyField()\n invited_by = ReadOnlyField(source='invited_by.get_display_name')\n\n\n class Meta:\n model = Invitation\n fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'\n\n\nclass TeamSerializer(ModelSerializer):\n slug = SlugField(required=False, validators=[UniqueValidator(queryset=\n Team.objects.all())])\n members = MembershipSerializer(source='membership_set', many=True,\n read_only=True)\n invitations = InvitationSerializer(many=True, read_only=True, source=\n 'pending_invitations')\n dashboard_url = ReadOnlyField()\n\n\n class Meta:\n model = Team\n fields = ('id', 'name', 'slug', 'members', 'invitations',\n 'dashboard_url')\n\n def create(self, validated_data):\n team_name = validated_data.get('name', None)\n validated_data['slug'] = validated_data.get('slug',\n get_next_unique_team_slug(team_name))\n return super().create(validated_data)\n",
"step-4": "from rest_framework.serializers import ModelSerializer\nfrom rest_framework.serializers import ReadOnlyField\nfrom rest_framework.serializers import SlugField\nfrom rest_framework.validators import UniqueValidator\nfrom django.db import models\nfrom illumidesk.teams.util import get_next_unique_team_slug\nfrom illumidesk.users.models import IllumiDeskUser\nfrom .models import Invitation\nfrom .models import Membership\nfrom .models import Team\n\n\nclass IllumiDeskUserSerializer(ModelSerializer):\n\n\n class Meta:\n model = IllumiDeskUser\n fields = 'first_name', 'last_name', 'get_display_name'\n abstract = True\n\n\nclass MembershipSerializer(ModelSerializer):\n first_name = ReadOnlyField(source='user.first_name')\n last_name = ReadOnlyField(source='user.last_name')\n display_name = ReadOnlyField(source='user.get_display_name')\n\n\n class Meta:\n model = Membership\n fields = 'id', 'first_name', 'last_name', 'display_name', 'role'\n\n\nclass InvitationSerializer(ModelSerializer):\n id = ReadOnlyField()\n invited_by = ReadOnlyField(source='invited_by.get_display_name')\n\n\n class Meta:\n model = Invitation\n fields = 'id', 'team', 'email', 'role', 'invited_by', 'is_accepted'\n\n\nclass TeamSerializer(ModelSerializer):\n slug = SlugField(required=False, validators=[UniqueValidator(queryset=\n Team.objects.all())])\n members = MembershipSerializer(source='membership_set', many=True,\n read_only=True)\n invitations = InvitationSerializer(many=True, read_only=True, source=\n 'pending_invitations')\n dashboard_url = ReadOnlyField()\n\n\n class Meta:\n model = Team\n fields = ('id', 'name', 'slug', 'members', 'invitations',\n 'dashboard_url')\n\n def create(self, validated_data):\n team_name = validated_data.get('name', None)\n validated_data['slug'] = validated_data.get('slug',\n get_next_unique_team_slug(team_name))\n return super().create(validated_data)\n",
"step-5": "from rest_framework.serializers import ModelSerializer\nfrom rest_framework.serializers import ReadOnlyField\nfrom rest_framework.serializers import SlugField\nfrom rest_framework.validators import UniqueValidator\n\nfrom django.db import models\n\nfrom illumidesk.teams.util import get_next_unique_team_slug\nfrom illumidesk.users.models import IllumiDeskUser\n\nfrom .models import Invitation\nfrom .models import Membership\nfrom .models import Team\n\n\nclass IllumiDeskUserSerializer(ModelSerializer):\n class Meta:\n model = IllumiDeskUser\n fields = ('first_name', 'last_name', 'get_display_name')\n abstract = True\n\nclass MembershipSerializer(ModelSerializer):\n first_name = ReadOnlyField(source='user.first_name')\n last_name = ReadOnlyField(source='user.last_name')\n display_name = ReadOnlyField(source='user.get_display_name')\n\n class Meta:\n model = Membership\n fields = ('id', 'first_name', 'last_name', 'display_name', 'role')\n\n\nclass InvitationSerializer(ModelSerializer):\n id = ReadOnlyField()\n invited_by = ReadOnlyField(source='invited_by.get_display_name')\n\n class Meta:\n model = Invitation\n fields = ('id', 'team', 'email', 'role', 'invited_by', 'is_accepted')\n\n\nclass TeamSerializer(ModelSerializer):\n slug = SlugField(\n required=False,\n validators=[UniqueValidator(queryset=Team.objects.all())],\n )\n members = MembershipSerializer(source='membership_set', many=True, read_only=True)\n invitations = InvitationSerializer(many=True, read_only=True, source='pending_invitations')\n dashboard_url = ReadOnlyField()\n\n class Meta:\n model = Team\n fields = ('id', 'name', 'slug', 'members', 'invitations', 'dashboard_url')\n\n def create(self, validated_data):\n team_name = validated_data.get(\"name\", None)\n validated_data['slug'] = validated_data.get(\"slug\", get_next_unique_team_slug(team_name))\n return super().create(validated_data)\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
class Order:
"""
Initiated a new order for the store
"""
def __init__(self, order_number, product_id, item_type, name, product_details, factory, quantity, holiday):
"""
Construct a new order
:param order_number: str
:param product_id: str
:param item_type: str
:param name: str
:param product_details: str
:param factory: Factory
:param quantity: int
:param holiday: str
"""
self._order_number = order_number
self._product_id = product_id
self._item_type = item_type
self._name = name
self._product_details = product_details
self._factory = factory
self._quantity = quantity
self._holiday = holiday
self._is_valid = True
self._invalid_notes = ""
@property
def quantity(self):
"""
Return quantity of the order.
:return: int
"""
return self._quantity
@property
def order_num(self):
"""
Return order num of the order.
:return: str
"""
return self._order_number
@property
def product_id(self):
"""
Return product id of the order.
:return: str
"""
return self._product_id
@property
def item_type(self):
"""
Return item type of the order.
:return: str
"""
return self._item_type
@property
def name(self):
"""
Return item name of the order.
:return: str
"""
return self._name
@property
def product_details(self):
"""
Return other details of the item of the order.
:return: str
"""
return self._product_details
@property
def factory(self):
"""
Return the factory that can generate the item.
:return: Factory
"""
return self._factory
@property
def holiday(self):
"""
Return the holiday that the item for.
:return: str
"""
return self._holiday
@property
def invalid_notes(self):
"""
Return the invalid notes if the item is invalid.
:return: str
"""
return self._invalid_notes
@property
def is_valid(self):
"""
Return the valid status.
:return: str
"""
return self._is_valid
def is_invalid(self):
"""
Set the status to invalid.
"""
self._is_valid = False
def set_invalid_notes(self, error):
"""
Set the invalid notes.
:param error: str
"""
self._invalid_notes = error
def __str__(self):
"""
String method of the class.
"""
return f"Order Number: {self._order_number} " \
f"Product ID: {self._product_id} " \
f"Item: {self._item_type} " \
f"Name: {self._name} " \
f"Quantity: {self._quantity} " \
f"Product details: {self._product_details} "
|
normal
|
{
"blob_id": "0dce4ea8ef21f2535194330b82ce5706ae694247",
"index": 4676,
"step-1": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n <mask token>\n <mask token>\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n <mask token>\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n <mask token>\n",
"step-2": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n <mask token>\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n",
"step-3": "class Order:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n <mask token>\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n <mask token>\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n",
"step-4": "class Order:\n <mask token>\n\n def __init__(self, order_number, product_id, item_type, name,\n product_details, factory, quantity, holiday):\n \"\"\"\n Construct a new order\n :param order_number: str\n :param product_id: str\n :param item_type: str\n :param name: str\n :param product_details: str\n :param factory: Factory\n :param quantity: int\n :param holiday: str\n \"\"\"\n self._order_number = order_number\n self._product_id = product_id\n self._item_type = item_type\n self._name = name\n self._product_details = product_details\n self._factory = factory\n self._quantity = quantity\n self._holiday = holiday\n self._is_valid = True\n self._invalid_notes = ''\n\n @property\n def quantity(self):\n \"\"\"\n Return quantity of the order.\n :return: int\n \"\"\"\n return self._quantity\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n\n @property\n def factory(self):\n \"\"\"\n Return the factory that can generate the item.\n :return: Factory\n \"\"\"\n return self._factory\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n\n @property\n def invalid_notes(self):\n \"\"\"\n Return the invalid notes if the item is invalid.\n :return: str\n \"\"\"\n return self._invalid_notes\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return (\n f'Order Number: {self._order_number} Product ID: {self._product_id} Item: {self._item_type} Name: {self._name} Quantity: {self._quantity} Product details: {self._product_details} '\n )\n",
"step-5": "class Order:\n \"\"\"\n Initiated a new order for the store\n \"\"\"\n\n def __init__(self, order_number, product_id, item_type, name, product_details, factory, quantity, holiday):\n \"\"\"\n Construct a new order\n :param order_number: str\n :param product_id: str\n :param item_type: str\n :param name: str\n :param product_details: str\n :param factory: Factory\n :param quantity: int\n :param holiday: str\n \"\"\"\n self._order_number = order_number\n self._product_id = product_id\n self._item_type = item_type\n self._name = name\n self._product_details = product_details\n self._factory = factory\n self._quantity = quantity\n self._holiday = holiday\n self._is_valid = True\n self._invalid_notes = \"\"\n\n @property\n def quantity(self):\n \"\"\"\n Return quantity of the order.\n :return: int\n \"\"\"\n return self._quantity\n\n @property\n def order_num(self):\n \"\"\"\n Return order num of the order.\n :return: str\n \"\"\"\n return self._order_number\n\n @property\n def product_id(self):\n \"\"\"\n Return product id of the order.\n :return: str\n \"\"\"\n return self._product_id\n\n @property\n def item_type(self):\n \"\"\"\n Return item type of the order.\n :return: str\n \"\"\"\n return self._item_type\n\n @property\n def name(self):\n \"\"\"\n Return item name of the order.\n :return: str\n \"\"\"\n return self._name\n\n @property\n def product_details(self):\n \"\"\"\n Return other details of the item of the order.\n :return: str\n \"\"\"\n return self._product_details\n\n @property\n def factory(self):\n \"\"\"\n Return the factory that can generate the item.\n :return: Factory\n \"\"\"\n return self._factory\n\n @property\n def holiday(self):\n \"\"\"\n Return the holiday that the item for.\n :return: str\n \"\"\"\n return self._holiday\n\n @property\n def invalid_notes(self):\n \"\"\"\n Return the invalid notes if the item is invalid.\n :return: str\n \"\"\"\n return self._invalid_notes\n\n @property\n def is_valid(self):\n \"\"\"\n Return the valid status.\n :return: str\n \"\"\"\n return self._is_valid\n\n def is_invalid(self):\n \"\"\"\n Set the status to invalid.\n \"\"\"\n self._is_valid = False\n\n def set_invalid_notes(self, error):\n \"\"\"\n Set the invalid notes.\n :param error: str\n \"\"\"\n self._invalid_notes = error\n\n def __str__(self):\n \"\"\"\n String method of the class.\n \"\"\"\n return f\"Order Number: {self._order_number} \" \\\n f\"Product ID: {self._product_id} \" \\\n f\"Item: {self._item_type} \" \\\n f\"Name: {self._name} \" \\\n f\"Quantity: {self._quantity} \" \\\n f\"Product details: {self._product_details} \"\n",
"step-ids": [
7,
10,
11,
15,
17
]
}
|
[
7,
10,
11,
15,
17
] |
"""Given an integer array arr and an integer difference, return the length of
the longest subsequence in arr which is an arithmetic sequence such that the
difference between adjacent elements in the subsequence equals difference."""
class Solution(object):
def longestSubsequence(self, arr, difference):
dp = dict()
mx = 0
for num in arr:
if num - difference in dp:
dp[num] = 1 + dp[num-difference]
else:
dp[num] = 1
mx = max(dp[num],mx)
return mx
|
normal
|
{
"blob_id": "fa4ab3ed5c653633879b5ba2c078c896aa3eb0c6",
"index": 2838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def longestSubsequence(self, arr, difference):\n dp = dict()\n mx = 0\n for num in arr:\n if num - difference in dp:\n dp[num] = 1 + dp[num - difference]\n else:\n dp[num] = 1\n mx = max(dp[num], mx)\n return mx\n",
"step-4": "\"\"\"Given an integer array arr and an integer difference, return the length of \nthe longest subsequence in arr which is an arithmetic sequence such that the \ndifference between adjacent elements in the subsequence equals difference.\"\"\"\n\n\nclass Solution(object):\n def longestSubsequence(self, arr, difference):\n dp = dict()\n mx = 0\n for num in arr:\n if num - difference in dp:\n dp[num] = 1 + dp[num-difference]\n else:\n dp[num] = 1\n mx = max(dp[num],mx)\n return mx\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。
#
# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。
#
# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。
#
# 示例 1:
#
# 输入: cost = [10, 15, 20]
# 输出: 15
# 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。
#
#
# 示例 2:
#
# 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]
# 输出: 6
# 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。
#
#
# 注意:
#
#
# cost 的长度将会在 [2, 1000]。
# 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。
#
# Related Topics 数组 动态规划
# leetcode submit region begin(Prohibit modification and deletion)
class Solution(object):
def minCostClimbingStairs(self, cost):
"""
:type cost: List[int]
:rtype: int
"""
# f1 = f2 = 0
# for x in reversed(cost):
# f1, f2 = x + min(f1, f2), f1
# return min(f1, f2)
result = [0 for _ in range(len(cost))]
result[0] = cost[0]
result[1] = cost[1]
for j in range(2, len(result)):
result[j] = min(result[j - 1], result[j - 2]) + cost[j]
return min(result[-2], result[-1])
if __name__ == '__main__':
solution = Solution()
costs = [10, 15, 20]
res = solution.minCostClimbingStairs(costs)
print(res)
|
normal
|
{
"blob_id": "38363316cc9a8419a528bb78b9ad03682e24172d",
"index": 9823,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution(object):\n\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n result = [(0) for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\n<mask token>\n",
"step-4": "class Solution(object):\n\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n result = [(0) for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\nif __name__ == '__main__':\n solution = Solution()\n costs = [10, 15, 20]\n res = solution.minCostClimbingStairs(costs)\n print(res)\n",
"step-5": "# 数组的每个索引作为一个阶梯,第 i个阶梯对应着一个非负数的体力花费值 cost[i](索引从0开始)。\n#\n# 每当你爬上一个阶梯你都要花费对应的体力花费值,然后你可以选择继续爬一个阶梯或者爬两个阶梯。\n#\n# 您需要找到达到楼层顶部的最低花费。在开始时,你可以选择从索引为 0 或 1 的元素作为初始阶梯。\n#\n# 示例 1:\n#\n# 输入: cost = [10, 15, 20]\n# 输出: 15\n# 解释: 最低花费是从cost[1]开始,然后走两步即可到阶梯顶,一共花费15。\n#\n#\n# 示例 2:\n#\n# 输入: cost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\n# 输出: 6\n# 解释: 最低花费方式是从cost[0]开始,逐个经过那些1,跳过cost[3],一共花费6。\n#\n#\n# 注意:\n#\n#\n# cost 的长度将会在 [2, 1000]。\n# 每一个 cost[i] 将会是一个Integer类型,范围为 [0, 999]。\n#\n# Related Topics 数组 动态规划\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution(object):\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n\n # f1 = f2 = 0\n # for x in reversed(cost):\n # f1, f2 = x + min(f1, f2), f1\n # return min(f1, f2)\n\n result = [0 for _ in range(len(cost))]\n result[0] = cost[0]\n result[1] = cost[1]\n for j in range(2, len(result)):\n result[j] = min(result[j - 1], result[j - 2]) + cost[j]\n return min(result[-2], result[-1])\n\n\n\n\nif __name__ == '__main__':\n solution = Solution()\n costs = [10, 15, 20]\n res = solution.minCostClimbingStairs(costs)\n print(res)\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.