index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
listlengths 1
5
|
---|---|---|---|---|---|---|---|
2,100 |
881d0c0808d8c0e656cdbf49450367553c100630
|
<mask token>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
<mask token>
|
<mask token>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
<mask token>
|
<mask token>
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
if __name__ == '__main__':
main()
|
import random
def get_all_words():
words = []
with open('poem.txt') as poem:
for line in poem:
line = line.strip().split(' ')
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = ''.join(word_as_list)
print(
f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}'
)
answer = input(
'შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ')
if answer.strip().upper() == 'Q':
print(
"""მადლობა თამაშისთვის და გახსოვდეს:
'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'"""
)
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(
f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!"
)
def main():
words_to_play = get_all_words()
print(
"""ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.
შენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით.
"""
)
game(words_to_play)
if __name__ == '__main__':
main()
|
# ეს არის კოდი, რომელიც ქმნის აბსურდს
import random
def get_all_words():
words = [] # ეს არის ლისტი ყველა ისეთი სიტყვის
with open("poem.txt") as poem: # რომლის ასოების სიმრავლეც 6-ზე ნაკლებია
for line in poem: # გრძელ სიტყვებთან თამაში რთული აღმოჩნდა
line = line.strip().split(" ")
for word in line:
if len(word) < 6:
words.append(word)
return words
def game(words):
while True:
# რენდომად ავარჩიოთ სიტყვა, რომელსაც მომხმარებელი გამოიცნობს
random_word_index = random.randint(0, len(words))
word_as_list = []
random_word_normal = words[random_word_index]
# რენდომად არჩეული სიტყვა გადავაქციოთ ლისტად და ლისტში შემავალი ელემენტები რენდომად დავაგენერიროთ
for x in random_word_normal:
word_as_list.insert(random.randint(0, len(word_as_list)), x)
random_word_funky = "".join(word_as_list)
print(f'გამოიცანიი სიტყვა, რომელიც შედგება შემდეგი ასოებისგან: {random_word_funky}')
answer = input("შეიყვანეთ სწორი ვერსია ან აკრიფე Q თამაშის შესაწყეტად: ")
if answer.strip().upper() == "Q":
print("მადლობა თამაშისთვის და გახსოვდეს:"
"\n'თუ თავი შენი შენ გახლავს, ღარიბად არ იხსენები!'")
break
if random_word_normal == answer.strip():
print(f"ყოჩაღ, '{answer}' სწორი პასუხია!")
else:
print(f"'{answer}' არასწორი პასუხია, სწორი პასუხია '{random_word_normal}'!")
def main():
words_to_play = get_all_words()
print('ეკრანზე გამოისახება "ვეფხისტყაოსნიდან" სიტყვები, სადაც ასოები შემთხვევითად არის განაწილებული.'
'\nშენი მისიაა, გამოიცნო რა სიტყვა დაწერა შოთამ ამ ასოებით. \n')
game(words_to_play)
if __name__ == '__main__':
main()
|
[
2,
3,
4,
5,
6
] |
2,101 |
d24bbfc3587a2a79891a11e00ec865498c01c286
|
<mask token>
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
<mask token>
|
<mask token>
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
<mask token>
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
<mask token>
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
<mask token>
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
<mask token>
DSA_2048('small_file.txt', key)
<mask token>
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
<mask token>
DSA_2048('large_file.txt', key)
<mask token>
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
|
<mask token>
kB = 1024
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
Begin = time.time()
DSA_2048('small_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
Begin = time.time()
DSA_2048('large_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
|
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open('public_key.pem', 'wb') as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print('Key Generation Time: ', End - Begin)
def DSA_2048(filename, key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
f = open('public_key.pem', 'r')
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
try:
verifier.verify(hash_obj, signature)
print('The message is authentic.')
except ValueError:
print('The message is not authentic.')
Begin = time.time()
DSA_2048('small_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 1 kb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')
Begin = time.time()
DSA_2048('large_file.txt', key)
End = time.time()
print('Time taken for DSA_2048 with 10 mb file: ', End - Begin)
if End - Begin != 0:
print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),
'bytes/sec')
exit()
|
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024 # 1kB
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760 # 1GB
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open("public_key.pem", "wb") as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print("Key Generation Time: ", End-Begin)
def DSA_2048(filename,key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
# Load the public key
f = open("public_key.pem", "r")
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
# Verify the authenticity of the message
try:
verifier.verify(hash_obj, signature)
print ("The message is authentic.")
except ValueError:
print ("The message is not authentic.")
Begin=time.time()
DSA_2048('small_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 1 kb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",1024/(End-Begin),"bytes/sec")
Begin=time.time()
DSA_2048('large_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 10 mb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",10485760/(End-Begin),"bytes/sec")
exit()
|
[
1,
2,
3,
4,
5
] |
2,102 |
6fc43919f521234d0dc9e167bb72f014e9c0bf17
|
<mask token>
|
<mask token>
class simple_drawing_window1(simple_drawing_window):
<mask token>
def paintEvent(self, e):
p = QPainter()
p.begin(self)
"""
p.setPen(QColor(0,0,0))
p.setBrush(QColor(0,127,0))
p.drawPolygon(
[QPoint(70,100), QPoint(100,110),
QPoint(130, 100), QPoint(100,150),]
)
"""
p.setPen(QColor(255, 127, 0))
p.setBrush(QColor(255, 127, 0))
p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),
QPoint(50, 400)])
p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)
p.end()
|
<mask token>
class simple_drawing_window1(simple_drawing_window):
def __init__(self):
super().__init__()
def paintEvent(self, e):
p = QPainter()
p.begin(self)
"""
p.setPen(QColor(0,0,0))
p.setBrush(QColor(0,127,0))
p.drawPolygon(
[QPoint(70,100), QPoint(100,110),
QPoint(130, 100), QPoint(100,150),]
)
"""
p.setPen(QColor(255, 127, 0))
p.setBrush(QColor(255, 127, 0))
p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),
QPoint(50, 400)])
p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)
p.end()
|
import sys
from PySide6.QtCore import *
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from simple_drawing_window import *
class simple_drawing_window1(simple_drawing_window):
def __init__(self):
super().__init__()
def paintEvent(self, e):
p = QPainter()
p.begin(self)
"""
p.setPen(QColor(0,0,0))
p.setBrush(QColor(0,127,0))
p.drawPolygon(
[QPoint(70,100), QPoint(100,110),
QPoint(130, 100), QPoint(100,150),]
)
"""
p.setPen(QColor(255, 127, 0))
p.setBrush(QColor(255, 127, 0))
p.drawPolygon([QPoint(50, 100), QPoint(200, 100), QPoint(200, 400),
QPoint(50, 400)])
p.drawPixmap(QRect(400, 150, 200, 200), self.rabbit)
p.end()
|
import sys
from PySide6.QtCore import *
from PySide6.QtWidgets import *
from PySide6.QtGui import *
from simple_drawing_window import *
class simple_drawing_window1( simple_drawing_window):
def __init__(self):
super().__init__()
def paintEvent(self, e):
p = QPainter()
p.begin(self)
"""
p.setPen(QColor(0,0,0))
p.setBrush(QColor(0,127,0))
p.drawPolygon(
[QPoint(70,100), QPoint(100,110),
QPoint(130, 100), QPoint(100,150),]
)
"""
p.setPen(QColor(255,127,0))
p.setBrush(QColor(255,127,0))
p.drawPolygon(
[QPoint(50,100), QPoint(200,100),QPoint(200,400), QPoint(50,400),]
)
p.drawPixmap(QRect(400,150,200,200), self.rabbit)
p.end()
|
[
0,
2,
3,
4,
5
] |
2,103 |
4f15e2743b33e2f672cd258172da852edb7e4118
|
<mask token>
|
<mask token>
EvinceRelation('different from')
|
from utils import *
EvinceRelation('different from')
|
from utils import *
EvinceRelation("different from")
| null |
[
0,
1,
2,
3
] |
2,104 |
9d8c4bf9f9279d5e30d0e9742cdd31713e5f4b9e
|
<mask token>
|
<mask token>
@app.route('/')
@app.route('/index')
def index():
return 'Hello world'
<mask token>
|
<mask token>
@app.route('/')
@app.route('/index')
def index():
return 'Hello world'
@app.route('/api_post', methods=['POST'])
def postJsonHandler():
print(request.is_json)
content = request.get_json()
print(content)
return 'JSON posted'
|
from app import app
from flask import request
@app.route('/')
@app.route('/index')
def index():
return 'Hello world'
@app.route('/api_post', methods=['POST'])
def postJsonHandler():
print(request.is_json)
content = request.get_json()
print(content)
return 'JSON posted'
| null |
[
0,
1,
2,
3
] |
2,105 |
166520ab5b9fd5a55dd2aa30b4d62f55096ce6cb
|
<mask token>
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if r.seqid in seqs or r.seqid.split('|')[0
] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs
):
GFF.write_collapseGFF_format(sys.stdout, r)
<mask token>
|
<mask token>
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if r.seqid in seqs or r.seqid.split('|')[0
] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs
):
GFF.write_collapseGFF_format(sys.stdout, r)
@app.command(name='')
def main(gff_filename: str=typer.Argument(..., help=
'Input gff filename to extract sequences from'), list_filename: str=
typer.Argument(..., help='List of sequence IDs to extract'), partial:
bool=typer.Option(False, help='OK if seq IDs only match the beginning'),
version: bool=typer.Option(None, '--version', callback=version_callback,
is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:
get_gff_from_list(gff_filename, list_filename, partial)
if __name__ == '__main__':
typer.run(main)
|
<mask token>
app = typer.Typer(name='cupcake.sequence.get_gffs_from_list', help=
'Get records from a GFF file from a list')
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if r.seqid in seqs or r.seqid.split('|')[0
] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs
):
GFF.write_collapseGFF_format(sys.stdout, r)
@app.command(name='')
def main(gff_filename: str=typer.Argument(..., help=
'Input gff filename to extract sequences from'), list_filename: str=
typer.Argument(..., help='List of sequence IDs to extract'), partial:
bool=typer.Option(False, help='OK if seq IDs only match the beginning'),
version: bool=typer.Option(None, '--version', callback=version_callback,
is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:
get_gff_from_list(gff_filename, list_filename, partial)
if __name__ == '__main__':
typer.run(main)
|
import sys
import typer
from cupcake import version_callback
from cupcake.sequence import GFF
app = typer.Typer(name='cupcake.sequence.get_gffs_from_list', help=
'Get records from a GFF file from a list')
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if r.seqid in seqs or r.seqid.split('|')[0
] in seqs or partial_ok and any(r.seqid.startswith(x) for x in seqs
):
GFF.write_collapseGFF_format(sys.stdout, r)
@app.command(name='')
def main(gff_filename: str=typer.Argument(..., help=
'Input gff filename to extract sequences from'), list_filename: str=
typer.Argument(..., help='List of sequence IDs to extract'), partial:
bool=typer.Option(False, help='OK if seq IDs only match the beginning'),
version: bool=typer.Option(None, '--version', callback=version_callback,
is_eager=True, help='Prints the version of the SQANTI3 package.')) ->None:
get_gff_from_list(gff_filename, list_filename, partial)
if __name__ == '__main__':
typer.run(main)
|
#!/usr/bin/env python
import sys
import typer
from cupcake import version_callback
from cupcake.sequence import GFF
app = typer.Typer(
name="cupcake.sequence.get_gffs_from_list",
help="Get records from a GFF file from a list",
)
def get_gff_from_list(gff_filename, listfile, partial_ok=False):
seqs = [line.strip() for line in open(listfile)]
for r in GFF.collapseGFFReader(gff_filename):
if (
r.seqid in seqs
or r.seqid.split("|")[0] in seqs
or (partial_ok and any(r.seqid.startswith(x) for x in seqs))
):
GFF.write_collapseGFF_format(sys.stdout, r)
@app.command(name="")
def main(
gff_filename: str = typer.Argument(
..., help="Input gff filename to extract sequences from"
),
list_filename: str = typer.Argument(..., help="List of sequence IDs to extract"),
partial: bool = typer.Option(
False,
help="OK if seq IDs only match the beginning",
),
version: bool = typer.Option(
None,
"--version",
callback=version_callback,
is_eager=True,
help="Prints the version of the SQANTI3 package.",
),
) -> None:
get_gff_from_list(gff_filename, list_filename, partial)
if __name__ == "__main__":
typer.run(main)
|
[
1,
3,
4,
5,
6
] |
2,106 |
e6884afaae15e903c62eecb3baec868548998080
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('words', '0004_auto_20180330_0647')]
operations = [migrations.AddField(model_name='review', name=
'modified_month', field=models.IntegerField(null=True)), migrations
.AddField(model_name='review', name='modified_week', field=models.
IntegerField(null=True)), migrations.AddField(model_name='review',
name='modified_year', field=models.IntegerField(null=True))]
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('words', '0004_auto_20180330_0647')]
operations = [migrations.AddField(model_name='review', name=
'modified_month', field=models.IntegerField(null=True)), migrations
.AddField(model_name='review', name='modified_week', field=models.
IntegerField(null=True)), migrations.AddField(model_name='review',
name='modified_year', field=models.IntegerField(null=True))]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-31 17:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('words', '0004_auto_20180330_0647'),
]
operations = [
migrations.AddField(
model_name='review',
name='modified_month',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='review',
name='modified_week',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='review',
name='modified_year',
field=models.IntegerField(null=True),
),
]
|
[
0,
1,
2,
3,
4
] |
2,107 |
1a42892095d820f1e91ba5e7f2804b5a21e39676
|
<mask token>
def click(valor):
global i
screen.insert(i, valor)
i += 1
<mask token>
def hacer_operacion():
ecuacion = screen.get()
try:
result = eval(ecuacion)
screen.delete(0, END)
screen.insert(0, result)
i = 0
except:
screen.delete(0, END)
r = screen.insert(0, 'ERROR')
print(r)
<mask token>
|
<mask token>
root.title('Calculadora LE-1409')
root.iconbitmap('calculadora.ico')
root.geometry('510x480')
root.config(bg='gray42')
root.resizable(False, False)
<mask token>
screen.grid(row=0, column=0, columnspan=5, padx=20, pady=20)
<mask token>
def click(valor):
global i
screen.insert(i, valor)
i += 1
def borrar():
screen.delete(0, END)
i = 0
def hacer_operacion():
ecuacion = screen.get()
try:
result = eval(ecuacion)
screen.delete(0, END)
screen.insert(0, result)
i = 0
except:
screen.delete(0, END)
r = screen.insert(0, 'ERROR')
print(r)
<mask token>
Button_Pi.grid(row=1, column=0, padx=10, pady=10)
<mask token>
Button_Left.grid(row=1, column=1, padx=10, pady=10)
<mask token>
Button_Right.grid(row=1, column=2, padx=10, pady=10)
<mask token>
Button_AC.grid(row=1, column=3, padx=10, pady=10)
<mask token>
Button_Div.grid(row=1, column=4, padx=10, pady=10)
<mask token>
Button_Exp.grid(row=2, column=0, padx=10, pady=10)
<mask token>
Button_7.grid(row=2, column=1, padx=10, pady=10)
<mask token>
Button_8.grid(row=2, column=2, padx=10, pady=10)
<mask token>
Button_9.grid(row=2, column=3, padx=10, pady=10)
<mask token>
Button_Multi.grid(row=2, column=4, padx=10, pady=10)
<mask token>
Button_Raiz.grid(row=3, column=0, padx=10, pady=10)
<mask token>
Button_4.grid(row=3, column=1, padx=10, pady=10)
<mask token>
Button_5.grid(row=3, column=2, padx=10, pady=10)
<mask token>
Button_6.grid(row=3, column=3, padx=10, pady=10)
<mask token>
Button_Menos.grid(row=3, column=4, padx=10, pady=10)
<mask token>
Button_LN.grid(row=4, column=0, padx=10, pady=10)
<mask token>
Button_1.grid(row=4, column=1, padx=10, pady=10)
<mask token>
Button_2.grid(row=4, column=2, padx=10, pady=10)
<mask token>
Button_3.grid(row=4, column=3, padx=10, pady=10)
<mask token>
Button_Mas.grid(row=4, column=4, padx=10, pady=10)
<mask token>
Button_Point.grid(row=5, column=0, padx=10, pady=10)
<mask token>
Button_0.grid(row=5, column=1, padx=10, pady=10)
<mask token>
Button_Igual.grid(row=5, column=2, columnspan=3, padx=10, pady=10)
root.mainloop()
|
<mask token>
root = Tk()
root.title('Calculadora LE-1409')
root.iconbitmap('calculadora.ico')
root.geometry('510x480')
root.config(bg='gray42')
root.resizable(False, False)
screen = Entry(root, font=('arial', 20, 'bold'), width=22, borderwidth=10,
background='CadetBlue1', justify='right')
screen.grid(row=0, column=0, columnspan=5, padx=20, pady=20)
i = 0
def click(valor):
global i
screen.insert(i, valor)
i += 1
def borrar():
screen.delete(0, END)
i = 0
def hacer_operacion():
ecuacion = screen.get()
try:
result = eval(ecuacion)
screen.delete(0, END)
screen.insert(0, result)
i = 0
except:
screen.delete(0, END)
r = screen.insert(0, 'ERROR')
print(r)
button_color = 'gray99'
width_button = 10
height_button = 3
Button_Pi = Button(root, text='π', bg=button_color, width=width_button,
height=height_button, command=lambda : click('pi'))
Button_Pi.grid(row=1, column=0, padx=10, pady=10)
Button_Left = Button(root, text='(', bg=button_color, width=width_button,
height=height_button, command=lambda : click('('))
Button_Left.grid(row=1, column=1, padx=10, pady=10)
Button_Right = Button(root, text=')', bg=button_color, width=width_button,
height=height_button, command=lambda : click(')'))
Button_Right.grid(row=1, column=2, padx=10, pady=10)
Button_AC = Button(root, text='AC', bg=button_color, width=width_button,
height=height_button, command=lambda : borrar())
Button_AC.grid(row=1, column=3, padx=10, pady=10)
Button_Div = Button(root, text='÷', bg=button_color, width=width_button,
height=height_button, command=lambda : click('/'))
Button_Div.grid(row=1, column=4, padx=10, pady=10)
Button_Exp = Button(root, text='EXP', bg=button_color, width=width_button,
height=height_button, command=lambda : click('exp'))
Button_Exp.grid(row=2, column=0, padx=10, pady=10)
Button_7 = Button(root, text='7', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(7))
Button_7.grid(row=2, column=1, padx=10, pady=10)
Button_8 = Button(root, text='8', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(8))
Button_8.grid(row=2, column=2, padx=10, pady=10)
Button_9 = Button(root, text='9', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(9))
Button_9.grid(row=2, column=3, padx=10, pady=10)
Button_Multi = Button(root, text='x', bg=button_color, width=width_button,
height=height_button, command=lambda : click('*'))
Button_Multi.grid(row=2, column=4, padx=10, pady=10)
Button_Raiz = Button(root, text='√', bg=button_color, width=width_button,
height=height_button, command=lambda : click('sqrt'))
Button_Raiz.grid(row=3, column=0, padx=10, pady=10)
Button_4 = Button(root, text='4', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(4))
Button_4.grid(row=3, column=1, padx=10, pady=10)
Button_5 = Button(root, text='5', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(5))
Button_5.grid(row=3, column=2, padx=10, pady=10)
Button_6 = Button(root, text='6', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(6))
Button_6.grid(row=3, column=3, padx=10, pady=10)
Button_Menos = Button(root, text='-', bg=button_color, width=width_button,
height=height_button, command=lambda : click('-'))
Button_Menos.grid(row=3, column=4, padx=10, pady=10)
Button_LN = Button(root, text='LN', bg=button_color, width=width_button,
height=height_button, command=lambda : click('log'))
Button_LN.grid(row=4, column=0, padx=10, pady=10)
Button_1 = Button(root, text='1', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(1))
Button_1.grid(row=4, column=1, padx=10, pady=10)
Button_2 = Button(root, text='2', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(2))
Button_2.grid(row=4, column=2, padx=10, pady=10)
Button_3 = Button(root, text='3', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(3))
Button_3.grid(row=4, column=3, padx=10, pady=10)
Button_Mas = Button(root, text='+', bg=button_color, width=width_button,
height=height_button, command=lambda : click('+'))
Button_Mas.grid(row=4, column=4, padx=10, pady=10)
Button_Point = Button(root, text='.', bg=button_color, width=width_button,
height=height_button, command=lambda : click('.'))
Button_Point.grid(row=5, column=0, padx=10, pady=10)
Button_0 = Button(root, text='0', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(0))
Button_0.grid(row=5, column=1, padx=10, pady=10)
Button_Igual = Button(root, text='=', bg=button_color, width='40', height=
height_button, command=lambda : hacer_operacion())
Button_Igual.grid(row=5, column=2, columnspan=3, padx=10, pady=10)
root.mainloop()
|
from tkinter import *
from math import *
root = Tk()
root.title('Calculadora LE-1409')
root.iconbitmap('calculadora.ico')
root.geometry('510x480')
root.config(bg='gray42')
root.resizable(False, False)
screen = Entry(root, font=('arial', 20, 'bold'), width=22, borderwidth=10,
background='CadetBlue1', justify='right')
screen.grid(row=0, column=0, columnspan=5, padx=20, pady=20)
i = 0
def click(valor):
global i
screen.insert(i, valor)
i += 1
def borrar():
screen.delete(0, END)
i = 0
def hacer_operacion():
ecuacion = screen.get()
try:
result = eval(ecuacion)
screen.delete(0, END)
screen.insert(0, result)
i = 0
except:
screen.delete(0, END)
r = screen.insert(0, 'ERROR')
print(r)
button_color = 'gray99'
width_button = 10
height_button = 3
Button_Pi = Button(root, text='π', bg=button_color, width=width_button,
height=height_button, command=lambda : click('pi'))
Button_Pi.grid(row=1, column=0, padx=10, pady=10)
Button_Left = Button(root, text='(', bg=button_color, width=width_button,
height=height_button, command=lambda : click('('))
Button_Left.grid(row=1, column=1, padx=10, pady=10)
Button_Right = Button(root, text=')', bg=button_color, width=width_button,
height=height_button, command=lambda : click(')'))
Button_Right.grid(row=1, column=2, padx=10, pady=10)
Button_AC = Button(root, text='AC', bg=button_color, width=width_button,
height=height_button, command=lambda : borrar())
Button_AC.grid(row=1, column=3, padx=10, pady=10)
Button_Div = Button(root, text='÷', bg=button_color, width=width_button,
height=height_button, command=lambda : click('/'))
Button_Div.grid(row=1, column=4, padx=10, pady=10)
Button_Exp = Button(root, text='EXP', bg=button_color, width=width_button,
height=height_button, command=lambda : click('exp'))
Button_Exp.grid(row=2, column=0, padx=10, pady=10)
Button_7 = Button(root, text='7', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(7))
Button_7.grid(row=2, column=1, padx=10, pady=10)
Button_8 = Button(root, text='8', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(8))
Button_8.grid(row=2, column=2, padx=10, pady=10)
Button_9 = Button(root, text='9', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(9))
Button_9.grid(row=2, column=3, padx=10, pady=10)
Button_Multi = Button(root, text='x', bg=button_color, width=width_button,
height=height_button, command=lambda : click('*'))
Button_Multi.grid(row=2, column=4, padx=10, pady=10)
Button_Raiz = Button(root, text='√', bg=button_color, width=width_button,
height=height_button, command=lambda : click('sqrt'))
Button_Raiz.grid(row=3, column=0, padx=10, pady=10)
Button_4 = Button(root, text='4', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(4))
Button_4.grid(row=3, column=1, padx=10, pady=10)
Button_5 = Button(root, text='5', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(5))
Button_5.grid(row=3, column=2, padx=10, pady=10)
Button_6 = Button(root, text='6', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(6))
Button_6.grid(row=3, column=3, padx=10, pady=10)
Button_Menos = Button(root, text='-', bg=button_color, width=width_button,
height=height_button, command=lambda : click('-'))
Button_Menos.grid(row=3, column=4, padx=10, pady=10)
Button_LN = Button(root, text='LN', bg=button_color, width=width_button,
height=height_button, command=lambda : click('log'))
Button_LN.grid(row=4, column=0, padx=10, pady=10)
Button_1 = Button(root, text='1', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(1))
Button_1.grid(row=4, column=1, padx=10, pady=10)
Button_2 = Button(root, text='2', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(2))
Button_2.grid(row=4, column=2, padx=10, pady=10)
Button_3 = Button(root, text='3', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(3))
Button_3.grid(row=4, column=3, padx=10, pady=10)
Button_Mas = Button(root, text='+', bg=button_color, width=width_button,
height=height_button, command=lambda : click('+'))
Button_Mas.grid(row=4, column=4, padx=10, pady=10)
Button_Point = Button(root, text='.', bg=button_color, width=width_button,
height=height_button, command=lambda : click('.'))
Button_Point.grid(row=5, column=0, padx=10, pady=10)
Button_0 = Button(root, text='0', bg='CadetBlue1', width=width_button,
height=height_button, command=lambda : click(0))
Button_0.grid(row=5, column=1, padx=10, pady=10)
Button_Igual = Button(root, text='=', bg=button_color, width='40', height=
height_button, command=lambda : hacer_operacion())
Button_Igual.grid(row=5, column=2, columnspan=3, padx=10, pady=10)
root.mainloop()
|
from tkinter import *
from math import *
#Raiz
root=Tk()
root.title('Calculadora LE-1409')
root.iconbitmap('calculadora.ico')
root.geometry('510x480')
root.config(bg='gray42')
root.resizable(False, False)
#Pantalla
screen=Entry(root, font=("arial",20, "bold"), width=22, borderwidth=10, background="CadetBlue1", justify="right")
screen.grid(row=0, column=0, columnspan=5, padx=20, pady=20)
#Logica
i = 0
def click(valor):
global i
screen.insert(i, valor)
i += 1
def borrar():
screen.delete(0, END)
i = 0
def hacer_operacion():
ecuacion=screen.get()
try:
result=eval(ecuacion)
screen.delete(0, END)
screen.insert(0, result)
i = 0
except:
screen.delete(0, END)
r=screen.insert(0, "ERROR")
print(r)
#Botones
button_color="gray99"
width_button=10
height_button=3
#Fila 1
Button_Pi=Button(root, text="π", bg=button_color, width=width_button, height=height_button,
command=lambda:click("pi"))
Button_Pi.grid(row=1, column=0, padx=10, pady=10)
Button_Left=Button(root, text="(", bg=button_color, width=width_button, height=height_button,
command=lambda:click("("))
Button_Left.grid(row=1, column=1, padx=10, pady=10)
Button_Right=Button(root, text=")", bg=button_color, width=width_button, height=height_button,
command=lambda:click(")"))
Button_Right.grid(row=1, column=2, padx=10, pady=10)
Button_AC=Button(root, text="AC", bg=button_color, width=width_button, height=height_button,
command=lambda:borrar())
Button_AC.grid(row=1, column=3, padx=10, pady=10)
Button_Div=Button(root, text="÷", bg=button_color, width=width_button, height=height_button,
command=lambda:click("/"))
Button_Div.grid(row=1, column=4, padx=10, pady=10)
#Fila 2
Button_Exp=Button(root, text="EXP", bg=button_color, width=width_button, height=height_button,
command=lambda:click("exp"))
Button_Exp.grid(row=2, column=0, padx=10, pady=10)
Button_7=Button(root, text="7", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(7))
Button_7.grid(row=2, column=1, padx=10, pady=10)
Button_8=Button(root, text="8", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(8))
Button_8.grid(row=2, column=2, padx=10, pady=10)
Button_9=Button(root, text="9", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(9))
Button_9.grid(row=2, column=3, padx=10, pady=10)
Button_Multi=Button(root, text="x", bg=button_color, width=width_button, height=height_button,
command=lambda:click("*"))
Button_Multi.grid(row=2, column=4, padx=10, pady=10)
#Fila 3
Button_Raiz=Button(root, text="√", bg=button_color, width=width_button, height=height_button,
command=lambda:click("sqrt"))
Button_Raiz.grid(row=3, column=0, padx=10, pady=10)
Button_4=Button(root, text="4", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(4))
Button_4.grid(row=3, column=1, padx=10, pady=10)
Button_5=Button(root, text="5", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(5))
Button_5.grid(row=3, column=2, padx=10, pady=10)
Button_6=Button(root, text="6", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(6))
Button_6.grid(row=3, column=3, padx=10, pady=10)
Button_Menos=Button(root, text="-", bg=button_color, width=width_button, height=height_button,
command=lambda:click("-"))
Button_Menos.grid(row=3, column=4, padx=10, pady=10)
#Fila 4
Button_LN=Button(root, text="LN", bg=button_color, width=width_button, height=height_button,
command=lambda:click("log"))
Button_LN.grid(row=4, column=0, padx=10, pady=10)
Button_1=Button(root, text="1", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(1))
Button_1.grid(row=4, column=1, padx=10, pady=10)
Button_2=Button(root, text="2", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(2))
Button_2.grid(row=4, column=2, padx=10, pady=10)
Button_3=Button(root, text="3", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(3))
Button_3.grid(row=4, column=3, padx=10, pady=10)
Button_Mas=Button(root, text="+", bg=button_color, width=width_button, height=height_button,
command=lambda:click("+"))
Button_Mas.grid(row=4, column=4, padx=10, pady=10)
#Fila 5
Button_Point=Button(root, text=".", bg=button_color, width=width_button, height=height_button,
command=lambda:click("."))
Button_Point.grid(row=5, column=0, padx=10, pady=10)
Button_0=Button(root, text="0", bg="CadetBlue1", width=width_button, height=height_button,
command=lambda:click(0))
Button_0.grid(row=5, column=1, padx=10, pady=10)
Button_Igual=Button(root, text="=", bg=button_color, width="40", height=height_button,
command=lambda: hacer_operacion())
Button_Igual.grid(row=5, column=2, columnspan=3, padx=10, pady=10)
root.mainloop()
|
[
2,
4,
5,
6,
7
] |
2,108 |
4545d9756d1f396ead0b0c75d319fb6a718375cd
|
<mask token>
|
<mask token>
for i in range(len(check_list)):
if check_list[i] in sentence:
check = True
idx = sentence.find(check_list[i])
sentence = sentence[idx + 1:]
else:
check = False
break
if check == True:
print('I love UCPC')
else:
print('I hate UCPC')
|
sentence = input()
check_list = ['U', 'C', 'P', 'C']
check = True
for i in range(len(check_list)):
if check_list[i] in sentence:
check = True
idx = sentence.find(check_list[i])
sentence = sentence[idx + 1:]
else:
check = False
break
if check == True:
print('I love UCPC')
else:
print('I hate UCPC')
|
sentence = input()
check_list = ["U", "C", "P", "C"]
check = True
for i in range(len(check_list)):
if check_list[i] in sentence:
check = True
idx = sentence.find(check_list[i])
sentence = sentence[idx+1:]
else:
check = False
break
if check == True:
print("I love UCPC")
else:
print("I hate UCPC")
| null |
[
0,
1,
2,
3
] |
2,109 |
c71e367ad320d7eadabbbfda728d94448db6441d
|
<mask token>
|
<mask token>
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
<mask token>
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
<mask token>
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
<mask token>
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
<mask token>
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
<mask token>
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
<mask token>
f.write(newFootprint)
f.close()
|
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
<mask token>
filename = 'iCEstick.kicad_mod'
header = ''
footer = ''
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
designators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]
designators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',
'88', '90', '91', 'GND', '3V3']]
designators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads_j2 = []
x = oldX - 5.8
newY = oldY - 21.81 + 4.49 + 5 * 2.54
y = newY
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
y = newY
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads = pads_j1 + pads_j2 + pads_j3
newFootprint = header
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
f = open(filename, 'w')
f.write(newFootprint)
f.close()
|
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
from os.path import exists
from pad import *
filename = 'iCEstick.kicad_mod'
header = ''
footer = ''
if exists(filename):
f = open(filename)
footprint = f.read()
f.close()
headerEndIndex = footprint.find('(pad ')
header = footprint[:headerEndIndex]
lastPadIndex = headerEndIndex
while footprint.find('(pad ', lastPadIndex) > -1:
lastPadIndex = footprint.find('(pad ', lastPadIndex) + 5
footerStartIndex = footprint.find('))', lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find('TE-Connectivity') < 0:
header = """(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ')'
designators_j1 = ['3V3', 'GND'] + [str(n) for n in range(112, 120)]
designators_j2 = [[str(n) for n in range(78, 82)] + ['GND', '3V3'], ['87',
'88', '90', '91', 'GND', '3V3']]
designators_j3 = ['3V3', 'GND', '62', '61', '60', '56', '48', '47', '45', '44']
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j1[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads_j2 = []
x = oldX - 5.8
newY = oldY - 21.81 + 4.49 + 5 * 2.54
y = newY
for i in range(6):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j2[0][i], through_hole=True, plated
=True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j2 += [newPad]
y -= 2.54
x -= 2.54
y = newY
for i in range(6):
newPad = Pad(designator=designators_j2[1][i], through_hole=True, plated
=True, shape=Shape.CIRCLE, at=(x, y), size=(padWidth, padWidth),
drill=drillDiameter)
pads_j2 += [newPad]
y -= 2.54
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
if i == 0:
shape = Shape.RECT
else:
shape = Shape.CIRCLE
newPad = Pad(designator=designators_j3[i], through_hole=True, plated=
True, shape=shape, at=(x, y), size=(padWidth, padWidth), drill=
drillDiameter)
pads_j1 += [newPad]
x -= 2.54
pads = pads_j1 + pads_j2 + pads_j3
newFootprint = header
for pad in pads:
newFootprint += str(pad) + '\n'
newFootprint += footer.strip()
print(newFootprint)
f = open(filename, 'w')
f.write(newFootprint)
f.close()
|
#!/usr/bin/python
# Point of origin (connector J3, pad 1, net 3V3)
x = 0.0
y = 0.0
drillDiameter = 1.0
padWidth = 1.6
from os.path import exists
from pad import *
filename="iCEstick.kicad_mod"
header = ""
footer = ""
if exists(filename):
# Read existing footprint
f = open(filename)
footprint = f.read()
f.close()
# Find the end of the header
headerEndIndex = footprint.find("(pad ")
header = footprint[:headerEndIndex]
# Find the end of the pads list
lastPadIndex = headerEndIndex
while (footprint.find("(pad ", lastPadIndex) > -1):
lastPadIndex = footprint.find("(pad ", lastPadIndex) + 5
footerStartIndex = footprint.find("))", lastPadIndex) + 2
footer = footprint[footerStartIndex:]
if header.find("TE-Connectivity") < 0:
header = \
"""(module iCEstick (layer F.Cu) (tedit 5BD73D6F)
(fp_text reference REF** (at 0 -12.7) (layer F.SilkS)
(effects (font (size 1 1) (thickness 0.15)))
)
(fp_text value iCEstick (at 0 25.4) (layer F.Fab)
(effects (font (size 1 1) (thickness 0.15)))
)
"""
footer = ")"
#
# Generate pads according to schematic drawing
#
designators_j1 = ["3V3", "GND"] + [str(n) for n in range(112,120)]
designators_j2 = [ \
[str(n) for n in range(78,82)] + ["GND", "3V3"], \
["87", "88", "90", "91", "GND", "3V3"] \
]
designators_j3 = ["3V3", "GND", "62", "61", "60", "56", "48", "47", "45", "44"]
#
# J1 connector pad list
#
pads_j1 = []
oldX = x
oldY = y
y -= 21.81
for i in range(10):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j1[i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j1 += [newPad]
x -= 2.54
#
# J2 connector pad list
#
pads_j2 = []
x = oldX - 5.80
newY = oldY - 21.81 + 4.49 + 5*2.54
y = newY
for i in range(6):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j2[0][i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j2 += [newPad]
y -= 2.54
# Second (inner) row of pins of J2
x -= 2.54
y = newY
for i in range(6):
# Create pad object
newPad = Pad(
designator = designators_j2[1][i],
through_hole = True,
plated = True,
shape = Shape.CIRCLE,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j2 += [newPad]
y -= 2.54
#
# J3 connector pad list
#
pads_j3 = []
x = oldX
y = oldY
for i in range(10):
# The first pad is a rectangle, the remaining ones are circular
if (i == 0):
shape = Shape.RECT
else:
shape = Shape.CIRCLE
# Create pad object
newPad = Pad(
designator = designators_j3[i],
through_hole = True,
plated = True,
shape = shape,
at = (x, y),
size = (padWidth, padWidth),
drill = drillDiameter
)
pads_j1 += [newPad]
x -= 2.54
# Make a list of all pads
pads = pads_j1 + pads_j2 + pads_j3
# Compose new footprint from header, pads and footer
newFootprint = header
for pad in pads:
newFootprint += str(pad) + "\n"
newFootprint += footer.strip()
# Print generated footprint to screen
print(newFootprint)
# Save generated footprint to file
f = open(filename, "w")
f.write(newFootprint)
f.close()
|
[
0,
1,
2,
3,
4
] |
2,110 |
463f50567c9dd4b7b47a84eea715541cec5d3cb5
|
<mask token>
|
<mask token>
class IndexPage:
def login(self, username, password):
BasePage.open_url(self, self.base_url)
BasePage.send_key(self, 'css', '#username', username)
BasePage.send_key(self, 'css', '#password', password)
BasePage.click_element(self, 'css', '.ant-btn')
<mask token>
|
<mask token>
sys.path.append('../')
<mask token>
class IndexPage:
def login(self, username, password):
BasePage.open_url(self, self.base_url)
BasePage.send_key(self, 'css', '#username', username)
BasePage.send_key(self, 'css', '#password', password)
BasePage.click_element(self, 'css', '.ant-btn')
if __name__ == '__main__':
login_cookies(self)
|
from time import sleep
import sys
sys.path.append('../')
from common.encapsulation import BasePage
class IndexPage:
def login(self, username, password):
BasePage.open_url(self, self.base_url)
BasePage.send_key(self, 'css', '#username', username)
BasePage.send_key(self, 'css', '#password', password)
BasePage.click_element(self, 'css', '.ant-btn')
if __name__ == '__main__':
login_cookies(self)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Dang Kai
# @Date: 2018-10-30 15:52:57
# @Last Modified time: 2018-11-10 09:09:21
# @E-mail: [email protected]
# @Description:
from time import sleep
import sys
sys.path.append('../')
from common.encapsulation import BasePage
class IndexPage:
def login(self, username, password):
# 登录页面
BasePage.open_url(self,self.base_url)
BasePage.send_key(self,'css','#username',username)
BasePage.send_key(self,'css',"#password",password)
BasePage.click_element(self,"css",".ant-btn")
if __name__ == '__main__':
login_cookies(self)
|
[
0,
2,
3,
4,
5
] |
2,111 |
7ee5779625d53ff1e18f73b20ba5849666f89b55
|
<mask token>
class RandomTest:
def __init__(self, seed=42, deterministic=False):
self.seed = seed
self.deterministic = deterministic
@fixed_random
def test_function(self, i):
return [random.randint(0, 10) for x in range(10)]
<mask token>
|
<mask token>
def fixed_random(func):
"""Create the data"""
def _func(self, i):
state = random.getstate()
if self.deterministic or self.seed is not None:
random.seed(self.seed + i)
results = func(self, i)
random.setstate(state)
else:
results = func(self, i)
return results
return _func
class RandomTest:
def __init__(self, seed=42, deterministic=False):
self.seed = seed
self.deterministic = deterministic
@fixed_random
def test_function(self, i):
return [random.randint(0, 10) for x in range(10)]
<mask token>
|
<mask token>
def fixed_random(func):
"""Create the data"""
def _func(self, i):
state = random.getstate()
if self.deterministic or self.seed is not None:
random.seed(self.seed + i)
results = func(self, i)
random.setstate(state)
else:
results = func(self, i)
return results
return _func
class RandomTest:
def __init__(self, seed=42, deterministic=False):
self.seed = seed
self.deterministic = deterministic
@fixed_random
def test_function(self, i):
return [random.randint(0, 10) for x in range(10)]
rt = RandomTest(0)
print(rt.test_function(0))
print(rt.test_function(0))
rt.seed = 1
print(rt.test_function(0))
print(rt.test_function(0))
|
<mask token>
import random
def fixed_random(func):
"""Create the data"""
def _func(self, i):
state = random.getstate()
if self.deterministic or self.seed is not None:
random.seed(self.seed + i)
results = func(self, i)
random.setstate(state)
else:
results = func(self, i)
return results
return _func
class RandomTest:
def __init__(self, seed=42, deterministic=False):
self.seed = seed
self.deterministic = deterministic
@fixed_random
def test_function(self, i):
return [random.randint(0, 10) for x in range(10)]
rt = RandomTest(0)
print(rt.test_function(0))
print(rt.test_function(0))
rt.seed = 1
print(rt.test_function(0))
print(rt.test_function(0))
|
#/usr/bin/env python3
"""Demonstrates how to do deterministic task generation using l2l"""
import random
def fixed_random(func):
"""Create the data"""
def _func(self, i):
state = random.getstate()
if self.deterministic or self.seed is not None:
random.seed(self.seed + i)
results = func(self, i)
random.setstate(state)
else:
results = func(self, i)
return results
return _func
class RandomTest:
def __init__(self, seed=42, deterministic=False):
self.seed = seed
self.deterministic = deterministic
@fixed_random
def test_function(self, i):
return [random.randint(0, 10) for x in range(10)]
rt = RandomTest(0)
print(rt.test_function(0))
print(rt.test_function(0))
rt.seed = 1
print(rt.test_function(0))
print(rt.test_function(0))
|
[
3,
4,
6,
7,
8
] |
2,112 |
270dba92af583e37c35ed5365f764adfdc2f947d
|
<mask token>
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
<mask token>
|
<mask token>
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
|
<mask token>
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open('data/topojson/topojson1.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
expected_results = [('foo', None, 'POINT EMPTY'), (None, None,
'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,
'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',
'LINESTRING EMPTY'), ('1', None,
'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,
'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,
'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,
'POLYGON EMPTY'), (None, None,
'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
), (None, None,
'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'
), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),
(None, None, 'MULTIPOLYGON EMPTY'), (None, None,
'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,
None,
'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'
), (None, None, 'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),
(None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
)]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if feat.GetField('id') != exp_result[0] or feat.GetField('name'
) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(
) != exp_result[2]:
feat.DumpReadable()
print(exp_result)
print(feat.GetField('name'))
pytest.fail('failure at feat index %d' % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
|
import ogrtest
import pytest
from osgeo import ogr
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open('data/topojson/topojson1.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
expected_results = [('foo', None, 'POINT EMPTY'), (None, None,
'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,
'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',
'LINESTRING EMPTY'), ('1', None,
'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,
'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,
'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,
'POLYGON EMPTY'), (None, None,
'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
), (None, None,
'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'
), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),
(None, None, 'MULTIPOLYGON EMPTY'), (None, None,
'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,
None,
'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'
), (None, None, 'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),
(None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
)]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if feat.GetField('id') != exp_result[0] or feat.GetField('name'
) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(
) != exp_result[2]:
feat.DumpReadable()
print(exp_result)
print(feat.GetField('name'))
pytest.fail('failure at feat index %d' % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
|
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: TopJSON driver test suite.
# Author: Even Rouault
#
###############################################################################
# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import ogrtest
import pytest
from osgeo import ogr
###############################################################################
# Test TopoJSON
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open("data/topojson/topojson1.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
expected_results = [
("foo", None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT (100 1010)"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, "0", "LINESTRING EMPTY"),
(None, "foo", "LINESTRING EMPTY"),
("1", None, "LINESTRING (100 1000,110 1000,110 1100)"),
("2", None, "LINESTRING (110 1100,110 1000,100 1000)"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(
None,
None,
"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
(
None,
None,
"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))",
),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT (100 1010,101 1020)"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(
None,
None,
"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))",
),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100))"),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))",
),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if (
feat.GetField("id") != exp_result[0]
or feat.GetField("name") != exp_result[1]
or feat.GetGeometryRef().ExportToWkt() != exp_result[2]
):
feat.DumpReadable()
print(exp_result)
print(feat.GetField("name"))
pytest.fail("failure at feat index %d" % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open("data/topojson/topojson2.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
feat = lyr.GetNextFeature()
assert feat["id"] == "foo"
assert feat["name"] == "line"
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open("data/topojson/topojson3.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
ds = None
|
[
1,
2,
3,
4,
5
] |
2,113 |
6c641ace8f1e5e8c42fa776bd7604daf243f9a41
|
<mask token>
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
<mask token>
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
<mask token>
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
<mask token>
IGNORE_LABEL = 255
STATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':
{'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
import torch
import torchvision.transforms.functional as F
import numpy as np
import yaml
from pathlib import Path
IGNORE_LABEL = 255
STATS = {'vit': {'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5)}, 'deit':
{'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225)}}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, 'r'), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat['name'])
if 'color' in cat:
colors[cat['id']] = torch.tensor(cat['color']).float() / 255
else:
colors[cat['id']] = torch.tensor(cmap[cat['id']]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \\in [0, 1]
"""
return F.normalize(x, stats['mean'], stats['std'])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \\in [-1, 1]
"""
mean = torch.tensor(stats['mean'])
std = torch.tensor(stats['std'])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
import torch
import torchvision.transforms.functional as F
import numpy as np
import yaml
from pathlib import Path
IGNORE_LABEL = 255
STATS = {
"vit": {"mean": (0.5, 0.5, 0.5), "std": (0.5, 0.5, 0.5)},
"deit": {"mean": (0.485, 0.456, 0.406), "std": (0.229, 0.224, 0.225)},
}
def seg_to_rgb(seg, colors):
im = torch.zeros((seg.shape[0], seg.shape[1], seg.shape[2], 3)).float()
cls = torch.unique(seg)
for cl in cls:
color = colors[int(cl)]
if len(color.shape) > 1:
color = color[0]
im[seg == cl] = color
return im
def dataset_cat_description(path, cmap=None):
desc = yaml.load(open(path, "r"), Loader=yaml.FullLoader)
colors = {}
names = []
for i, cat in enumerate(desc):
names.append(cat["name"])
if "color" in cat:
colors[cat["id"]] = torch.tensor(cat["color"]).float() / 255
else:
colors[cat["id"]] = torch.tensor(cmap[cat["id"]]).float()
colors[IGNORE_LABEL] = torch.tensor([0.0, 0.0, 0.0]).float()
return names, colors
def rgb_normalize(x, stats):
"""
x : C x *
x \in [0, 1]
"""
return F.normalize(x, stats["mean"], stats["std"])
def rgb_denormalize(x, stats):
"""
x : N x C x *
x \in [-1, 1]
"""
mean = torch.tensor(stats["mean"])
std = torch.tensor(stats["std"])
for i in range(3):
x[:, i, :, :] = x[:, i, :, :] * std[i] + mean[i]
return x
|
[
2,
4,
5,
6,
7
] |
2,114 |
d373d283a622262e2da974549907bdd8f61e89ec
|
<mask token>
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
elif current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get(
'reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get(
'reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get(
'reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get(
'reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get(
'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),
size=len(all_product), filename=filename, product=all_product, simc
=simc, simn=simn, simp=simp, simnal=simnal)
<mask token>
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect('/')
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[
DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',
'М'), ('2', 'Ж')])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
<mask token>
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=
filename, basket_count=session.get('basket_count', 0), title=
'Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(),
NumberRange(1)], default=1)
submit = SubmitField('В корзину')
<mask token>
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
<mask token>
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[
DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='Неверный пароль',
again_password_error='OK', new_password_error='OK',
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=
'Новый пароль не должен совпадть со старым!', filename=filename
)
if result != 'OK':
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', new_password_error='OK',
again_password_error='Пароли не совпадают!', filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form, basket_count=
session.get('basket_count', 0), title='Сменить пароль', filename=
filename, old_password_error='OK', again_password_error='OK',
new_password_error='OK')
def main():
db_session.global_init('db/blogs.sqlite')
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource,
'/api/v2/products/<int:product_id>')
app.run()
<mask token>
|
<mask token>
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
elif current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get(
'reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get(
'reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get(
'reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get(
'reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get(
'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),
size=len(all_product), filename=filename, product=all_product, simc
=simc, simn=simn, simp=simp, simnal=simnal)
@app.route('/login', methods=['GET', 'POST'])
def login():
session['tag'] = ''
form = LoginForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).filter(users.User.email ==
form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
bask = list(map(lambda x: [session_in_db.query(products.
Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return redirect('/')
return render_template('login_form.html', message=
'Неправильный логин или пароль', form=form)
return render_template('login_form.html', basket_count=session.get(
'basket_count', 0), title='Авторизация', form=form, filename='profilem'
)
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect('/')
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[
DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',
'М'), ('2', 'Ж')])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
result = check_password(form.password.data)
if result != 'OK':
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', again_password_error='OK', password_error
=result)
if form.password.data != form.password_again.data:
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', password_error='OK', again_password_error
='Пароли не совпадают')
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
if session_in_db.query(users.User).filter(users.User.email == form.
email.data).first():
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
password_error='OK', again_password_error='OK', email_error
='Такой пользователь уже есть')
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user = users.User(name=form.name.data, midname=form.mname.data,
gender=gen, email=form.email.data, surname=form.surname.data,
age=form.age.data, hashed_password=form.password.data)
session_in_db.add(user)
session_in_db.commit()
return redirect('/login')
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form, filename=
'profilem', email_error='OK', password_error='OK',
again_password_error='OK')
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
if request.method == 'GET':
filename = get_profile_img()
params = {'title': 'Профиль', 'filename': filename, 'id':
current_user.id, 'name': current_user.name, 'sname':
current_user.surname, 'mname': current_user.midname, 'gender':
current_user.gender, 'age': current_user.age, 'basket_count':
session.get('basket_count', 0)}
return render_template('profile.html', **params)
elif request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'],
f'{current_user.id}.jpg'))
return redirect('/profile')
<mask token>
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=
filename, basket_count=session.get('basket_count', 0), title=
'Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(),
NumberRange(1)], default=1)
submit = SubmitField('В корзину')
@app.route('/product/<int:product_id>', methods=['GET', 'POST'])
def product(product_id):
form = Buy()
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if form.validate_on_submit():
if current_user.is_authenticated:
if sessions.query(products.Products).get(product_id
).existence and form.count.data <= prod.still_have:
prod.still_have -= form.count.data
if prod.still_have == 0:
prod.existence = 0
user = sessions.query(users.User).get(current_user.id)
if user.basket:
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for
x in user.basket.strip().split()]
change_product = False
for item in bask:
if item[0] == product_id:
item[1] += form.count.data
change_product = True
if not change_product:
user.basket = (user.basket +
f'{product_id}-{form.count.data} ')
else:
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for
x in bask])
bask += ' '
user.basket = bask
else:
user.basket = f'{product_id}-{form.count.data} '
sessions.commit()
else:
return render_template('product.html', prod=prod, filename=
filename, title=prod.title, form=form, basket_count=
session.get('basket_count', 0), message=
'Товара в таком колличестве нет в наличии!')
else:
return render_template('product.html', prod=prod, filename=
filename, basket_count=session.get('basket_count', 0),
title=prod.title, form=form, message='Вы не авторизованы')
return redirect('/basket')
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title, form
=form)
<mask token>
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
@app.route('/change/<string:pos>')
def change(pos):
last_pos = session.get('sort', 'none')
if last_pos == pos:
session['reverse'] = not session.get('reverse', False)
else:
session['reverse'] = False
session['sort'] = pos
return redirect('/')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[
DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='Неверный пароль',
again_password_error='OK', new_password_error='OK',
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=
'Новый пароль не должен совпадть со старым!', filename=filename
)
if result != 'OK':
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', new_password_error='OK',
again_password_error='Пароли не совпадают!', filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form, basket_count=
session.get('basket_count', 0), title='Сменить пароль', filename=
filename, old_password_error='OK', again_password_error='OK',
new_password_error='OK')
def main():
db_session.global_init('db/blogs.sqlite')
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource,
'/api/v2/products/<int:product_id>')
app.run()
<mask token>
|
<mask token>
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
elif current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get(
'reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get(
'reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get(
'reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get(
'reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get(
'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),
size=len(all_product), filename=filename, product=all_product, simc
=simc, simn=simn, simp=simp, simnal=simnal)
@app.route('/login', methods=['GET', 'POST'])
def login():
session['tag'] = ''
form = LoginForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).filter(users.User.email ==
form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
bask = list(map(lambda x: [session_in_db.query(products.
Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return redirect('/')
return render_template('login_form.html', message=
'Неправильный логин или пароль', form=form)
return render_template('login_form.html', basket_count=session.get(
'basket_count', 0), title='Авторизация', form=form, filename='profilem'
)
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect('/')
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[
DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',
'М'), ('2', 'Ж')])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
result = check_password(form.password.data)
if result != 'OK':
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', again_password_error='OK', password_error
=result)
if form.password.data != form.password_again.data:
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', password_error='OK', again_password_error
='Пароли не совпадают')
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
if session_in_db.query(users.User).filter(users.User.email == form.
email.data).first():
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
password_error='OK', again_password_error='OK', email_error
='Такой пользователь уже есть')
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user = users.User(name=form.name.data, midname=form.mname.data,
gender=gen, email=form.email.data, surname=form.surname.data,
age=form.age.data, hashed_password=form.password.data)
session_in_db.add(user)
session_in_db.commit()
return redirect('/login')
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form, filename=
'profilem', email_error='OK', password_error='OK',
again_password_error='OK')
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
if request.method == 'GET':
filename = get_profile_img()
params = {'title': 'Профиль', 'filename': filename, 'id':
current_user.id, 'name': current_user.name, 'sname':
current_user.surname, 'mname': current_user.midname, 'gender':
current_user.gender, 'age': current_user.age, 'basket_count':
session.get('basket_count', 0)}
return render_template('profile.html', **params)
elif request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'],
f'{current_user.id}.jpg'))
return redirect('/profile')
@app.route('/basket', methods=['GET', 'POST'])
@login_required
def basket():
sessions = db_session.create_session()
filename = get_profile_img()
user = load_user(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]),
x[1]], bask))
session['basket_count'] = len(bask)
return render_template('basket.html', basket_count=session.get(
'basket_count', 0), title='Корзина', filename=filename, bask=bask)
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=
filename, basket_count=session.get('basket_count', 0), title=
'Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(),
NumberRange(1)], default=1)
submit = SubmitField('В корзину')
@app.route('/product/<int:product_id>', methods=['GET', 'POST'])
def product(product_id):
form = Buy()
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if form.validate_on_submit():
if current_user.is_authenticated:
if sessions.query(products.Products).get(product_id
).existence and form.count.data <= prod.still_have:
prod.still_have -= form.count.data
if prod.still_have == 0:
prod.existence = 0
user = sessions.query(users.User).get(current_user.id)
if user.basket:
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for
x in user.basket.strip().split()]
change_product = False
for item in bask:
if item[0] == product_id:
item[1] += form.count.data
change_product = True
if not change_product:
user.basket = (user.basket +
f'{product_id}-{form.count.data} ')
else:
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for
x in bask])
bask += ' '
user.basket = bask
else:
user.basket = f'{product_id}-{form.count.data} '
sessions.commit()
else:
return render_template('product.html', prod=prod, filename=
filename, title=prod.title, form=form, basket_count=
session.get('basket_count', 0), message=
'Товара в таком колличестве нет в наличии!')
else:
return render_template('product.html', prod=prod, filename=
filename, basket_count=session.get('basket_count', 0),
title=prod.title, form=form, message='Вы не авторизованы')
return redirect('/basket')
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title, form
=form)
@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_plus(product_id):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if prod.still_have:
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] += 1
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod.still_have -= 1
sessions.commit()
return redirect('/basket')
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
@app.route('/change/<string:pos>')
def change(pos):
last_pos = session.get('sort', 'none')
if last_pos == pos:
session['reverse'] = not session.get('reverse', False)
else:
session['reverse'] = False
session['sort'] = pos
return redirect('/')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[
DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='Неверный пароль',
again_password_error='OK', new_password_error='OK',
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=
'Новый пароль не должен совпадть со старым!', filename=filename
)
if result != 'OK':
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', new_password_error='OK',
again_password_error='Пароли не совпадают!', filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form, basket_count=
session.get('basket_count', 0), title='Сменить пароль', filename=
filename, old_password_error='OK', again_password_error='OK',
new_password_error='OK')
def main():
db_session.global_init('db/blogs.sqlite')
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource,
'/api/v2/products/<int:product_id>')
app.run()
<mask token>
|
from flask import Flask, render_template, redirect, request, session, flash
from data import db_session
from data import users, products
import os
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, NumberRange
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import datetime
from flask_restful import Api
import product_resource
from random import shuffle
app = Flask(__name__)
api = Api(app)
app.debug = True
UPLOAD_FOLDER = f'{os.getcwd()}\\static\\img\\profile_img'
app.config['SECRET_KEY'] = '12345aA'
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=1)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
login_manager = LoginManager()
login_manager.init_app(app)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
elif current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get(
'reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get(
'reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get(
'reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get(
'reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get(
'basket_count', 0), title='CoolStore', tag=session.get('tag', ''),
size=len(all_product), filename=filename, product=all_product, simc
=simc, simn=simn, simp=simp, simnal=simnal)
@app.route('/login', methods=['GET', 'POST'])
def login():
session['tag'] = ''
form = LoginForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).filter(users.User.email ==
form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
bask = list(map(lambda x: [session_in_db.query(products.
Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return redirect('/')
return render_template('login_form.html', message=
'Неправильный логин или пароль', form=form)
return render_template('login_form.html', basket_count=session.get(
'basket_count', 0), title='Авторизация', form=form, filename='profilem'
)
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect('/')
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[
DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField('Пол', validators=[DataRequired()], choices=[('1',
'М'), ('2', 'Ж')])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
result = check_password(form.password.data)
if result != 'OK':
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', again_password_error='OK', password_error
=result)
if form.password.data != form.password_again.data:
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
email_error='OK', password_error='OK', again_password_error
='Пароли не совпадают')
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
if session_in_db.query(users.User).filter(users.User.email == form.
email.data).first():
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form,
password_error='OK', again_password_error='OK', email_error
='Такой пользователь уже есть')
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user = users.User(name=form.name.data, midname=form.mname.data,
gender=gen, email=form.email.data, surname=form.surname.data,
age=form.age.data, hashed_password=form.password.data)
session_in_db.add(user)
session_in_db.commit()
return redirect('/login')
return render_template('reg.html', basket_count=session.get(
'basket_count', 0), title='Регистрация', form=form, filename=
'profilem', email_error='OK', password_error='OK',
again_password_error='OK')
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
if request.method == 'GET':
filename = get_profile_img()
params = {'title': 'Профиль', 'filename': filename, 'id':
current_user.id, 'name': current_user.name, 'sname':
current_user.surname, 'mname': current_user.midname, 'gender':
current_user.gender, 'age': current_user.age, 'basket_count':
session.get('basket_count', 0)}
return render_template('profile.html', **params)
elif request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'],
f'{current_user.id}.jpg'))
return redirect('/profile')
@app.route('/basket', methods=['GET', 'POST'])
@login_required
def basket():
sessions = db_session.create_session()
filename = get_profile_img()
user = load_user(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]),
x[1]], bask))
session['basket_count'] = len(bask)
return render_template('basket.html', basket_count=session.get(
'basket_count', 0), title='Корзина', filename=filename, bask=bask)
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = 'Мужской'
else:
gen = 'Женский'
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=
filename, basket_count=session.get('basket_count', 0), title=
'Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(),
NumberRange(1)], default=1)
submit = SubmitField('В корзину')
@app.route('/product/<int:product_id>', methods=['GET', 'POST'])
def product(product_id):
form = Buy()
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if form.validate_on_submit():
if current_user.is_authenticated:
if sessions.query(products.Products).get(product_id
).existence and form.count.data <= prod.still_have:
prod.still_have -= form.count.data
if prod.still_have == 0:
prod.existence = 0
user = sessions.query(users.User).get(current_user.id)
if user.basket:
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for
x in user.basket.strip().split()]
change_product = False
for item in bask:
if item[0] == product_id:
item[1] += form.count.data
change_product = True
if not change_product:
user.basket = (user.basket +
f'{product_id}-{form.count.data} ')
else:
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for
x in bask])
bask += ' '
user.basket = bask
else:
user.basket = f'{product_id}-{form.count.data} '
sessions.commit()
else:
return render_template('product.html', prod=prod, filename=
filename, title=prod.title, form=form, basket_count=
session.get('basket_count', 0), message=
'Товара в таком колличестве нет в наличии!')
else:
return render_template('product.html', prod=prod, filename=
filename, basket_count=session.get('basket_count', 0),
title=prod.title, form=form, message='Вы не авторизованы')
return redirect('/basket')
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title, form
=form)
@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_plus(product_id):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if prod.still_have:
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] += 1
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod.still_have -= 1
sessions.commit()
return redirect('/basket')
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.
basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
@app.route('/change/<string:pos>')
def change(pos):
last_pos = session.get('sort', 'none')
if last_pos == pos:
session['reverse'] = not session.get('reverse', False)
else:
session['reverse'] = False
session['sort'] = pos
return redirect('/')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[
DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='Неверный пароль',
again_password_error='OK', new_password_error='OK',
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=
'Новый пароль не должен совпадть со старым!', filename=filename
)
if result != 'OK':
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', again_password_error='OK',
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html', basket_count=
session.get('basket_count', 0), title='Регистрация', form=
form, old_password_error='OK', new_password_error='OK',
again_password_error='Пароли не совпадают!', filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form, basket_count=
session.get('basket_count', 0), title='Сменить пароль', filename=
filename, old_password_error='OK', again_password_error='OK',
new_password_error='OK')
def main():
db_session.global_init('db/blogs.sqlite')
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource,
'/api/v2/products/<int:product_id>')
app.run()
if __name__ == '__main__':
main()
|
from flask import Flask, render_template, redirect, request, session, flash
from data import db_session
from data import users, products
import os
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField, SelectField, IntegerField
from wtforms.fields.html5 import EmailField
from wtforms.validators import DataRequired, NumberRange
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
import datetime
from flask_restful import Api
import product_resource
from random import shuffle
app = Flask(__name__)
api = Api(app)
app.debug = True
UPLOAD_FOLDER = f'{os.getcwd()}\\static\\img\\profile_img'
app.config['SECRET_KEY'] = '12345aA'
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(days=1)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
login_manager = LoginManager()
login_manager.init_app(app)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() == 'jpg'
def get_profile_img():
os.chdir('static\\img\\profile_img')
if os.access(f'{current_user.id}.jpg', os.F_OK):
filename = str(current_user.id)
else:
if current_user.gender[0] == 'М':
filename = 'profilem'
else:
filename = 'profilef'
os.chdir('..\\..\\..')
return filename
def find_products(tag):
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
sessions.commit()
sessions = db_session.create_session()
all_products = sessions.query(products.Products).all()
ans_products = list()
for item in all_products:
if item.existence and item.still_have == 0:
item.existence = 0
elif not item.existence and item.still_have:
item.existence = 1
title = item.title.lower()
if tag in title or title in tag:
ans_products.append(item)
return ans_products
@app.errorhandler(404)
def not_found(error):
return render_template('404.html', error=error)
@login_manager.user_loader
def load_user(user_id):
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
return session_in_db.query(users.User).get(user_id)
class LoginForm(FlaskForm):
email = EmailField('Почта', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
remember_me = BooleanField('Запомнить меня')
submit = SubmitField('Войти')
@app.route('/', methods=['GET', 'POST'])
def index():
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
if request.method == 'POST':
session['tag'] = request.form['search']
return redirect('/')
all_product = find_products(session.get('tag', '').lower())
if session.get('reverse', False):
sim = '▲'
else:
sim = '▼'
simp = simc = simn = simnal = ''
pos = session.get('sort', 'none')
if pos == 'price':
all_product.sort(key=lambda x: x.price, reverse=session.get('reverse', False))
simp = sim
elif pos == 'nal':
all_product.sort(key=lambda x: x.existence, reverse=session.get('reverse', False))
simnal = sim
elif pos == 'count':
all_product.sort(key=lambda x: x.still_have, reverse=session.get('reverse', False))
simc = sim
elif pos == 'name':
simn = sim
all_product.sort(key=lambda x: x.title, reverse=session.get('reverse', False))
else:
shuffle(all_product)
return render_template('index.html', basket_count=session.get('basket_count', 0),
title="CoolStore", tag=session.get('tag', ''), size=len(all_product),
filename=filename, product=all_product, simc=simc, simn=simn, simp=simp,
simnal=simnal)
@app.route('/login', methods=['GET', 'POST'])
def login():
session['tag'] = ''
form = LoginForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).filter(users.User.email == form.email.data).first()
if user and user.check_password(form.password.data):
login_user(user, remember=form.remember_me.data)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
bask = list(
map(lambda x: [session_in_db.query(products.Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return redirect("/")
return render_template('login_form.html',
message="Неправильный логин или пароль",
form=form)
return render_template('login_form.html', basket_count=session.get('basket_count', 0),
title='Авторизация', form=form, filename="profilem")
@app.route('/logout')
@login_required
def logout():
session['tag'] = ''
logout_user()
return redirect("/")
class RegisterForm(FlaskForm):
email = EmailField('Email', validators=[DataRequired()])
password = PasswordField('Пароль', validators=[DataRequired()])
password_again = PasswordField('Повторите пароль', validators=[DataRequired()])
surname = StringField('Фамилия', validators=[DataRequired()])
name = StringField('Имя', validators=[DataRequired()])
mname = StringField('Отчество(при наличии)', validators=[DataRequired()])
gender = SelectField("Пол", validators=[DataRequired()], choices=[('1', 'М'), ('2', "Ж")])
age = StringField('Возраст', validators=[DataRequired()])
submit = SubmitField('Подтвердить')
class LengthError(Exception):
error = 'Пароль должен состоять не менее чем из 8 символов!'
class SymbolError(Exception):
error = 'В пароле должен быть хотя бы один символ!'
class LetterError(Exception):
error = 'В пароле должна быть хотя бы одна большая и маленькая буква!'
class DigitError(Exception):
error = 'В пароле должна быть хотя бы одна цифра!'
def bool_ys(password):
ys = [0, 0, 0, 0]
for i in password:
if i.isdigit():
ys[0] = 1
elif i.isalpha():
if i.isupper():
ys[1] = 1
else:
ys[2] = 1
else:
ys[3] = 1
if ys[2] * ys[1] == 0:
raise LetterError
if ys[0] == 0:
raise DigitError
if ys[3] == 0:
raise SymbolError
return 'ok'
def check_password(password):
try:
if len(password) <= 8:
raise LengthError
bool_ys(password)
return 'OK'
except (LengthError, SymbolError, LetterError, DigitError) as ex:
return ex.error
@app.route('/register', methods=['GET', 'POST'])
def reqister():
form = RegisterForm()
if form.validate_on_submit():
result = check_password(form.password.data)
if result != 'OK':
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, email_error="OK", again_password_error="OK",
password_error=result)
if form.password.data != form.password_again.data:
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, email_error="OK", password_error="OK",
again_password_error="Пароли не совпадают")
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
if session_in_db.query(users.User).filter(users.User.email == form.email.data).first():
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация',
form=form, password_error="OK", again_password_error="OK",
email_error="Такой пользователь уже есть")
if form.gender.data == '1':
gen = "Мужской"
else:
gen = "Женский"
user = users.User(
name=form.name.data,
midname=form.mname.data,
gender=gen,
email=form.email.data,
surname=form.surname.data,
age=form.age.data,
hashed_password=form.password.data
)
session_in_db.add(user)
session_in_db.commit()
return redirect('/login')
return render_template('reg.html', basket_count=session.get('basket_count', 0),
title='Регистрация', form=form, filename="profilem",
email_error="OK", password_error="OK", again_password_error="OK")
@app.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
if request.method == 'GET':
filename = get_profile_img()
params = {
'title': 'Профиль',
'filename': filename,
'id': current_user.id,
'name': current_user.name,
'sname': current_user.surname,
'mname': current_user.midname,
'gender': current_user.gender,
'age': current_user.age,
'basket_count': session.get('basket_count', 0)
}
return render_template('profile.html', **params)
elif request.method == 'POST':
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
file.save(os.path.join(app.config['UPLOAD_FOLDER'], f'{current_user.id}.jpg'))
return redirect('/profile')
@app.route('/basket', methods=['GET', 'POST'])
@login_required
def basket():
sessions = db_session.create_session()
filename = get_profile_img()
user = load_user(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]
bask = list(map(lambda x: [sessions.query(products.Products).get(x[0]), x[1]], bask))
session['basket_count'] = len(bask)
return render_template('basket.html', basket_count=session.get('basket_count', 0),
title='Корзина', filename=filename, bask=bask)
@app.route('/delete/<int:product_id>/<int:count>', methods=['GET', 'POST'])
def delete(product_id, count):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
prod.still_have += count
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in user.basket.strip().split()]
bask = list(filter(lambda x: x[0] != product_id, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
sessions.commit()
return redirect('/basket')
@app.route('/redact_profile', methods=['GET', 'POST'])
@login_required
def redact_profile():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
form = RegisterForm()
if request.method == 'GET':
if user.gender == 'Мужской':
gen = '1'
else:
gen = '2'
form.gender.data = gen
form.name.data = user.name
form.mname.data = user.midname
form.age.data = user.age
form.surname.data = user.surname
elif request.method == 'POST':
if form.gender.data == '1':
gen = "Мужской"
else:
gen = "Женский"
user.gender = gen
user.name = form.name.data
user.midname = form.mname.data
user.age = form.age.data
user.surname = form.surname.data
session_in_db.commit()
return redirect('/profile')
filename = get_profile_img()
return render_template('redact_profile.html', form=form, filename=filename,
basket_count=session.get('basket_count', 0), title='Редактирование')
class Buy(FlaskForm):
count = IntegerField('Колличество:', validators=[DataRequired(), NumberRange(1)],
default=1)
submit = SubmitField('В корзину')
@app.route('/product/<int:product_id>', methods=['GET', 'POST'])
def product(product_id):
form = Buy()
if current_user.is_authenticated:
filename = get_profile_img()
else:
filename = 'profilem'
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if form.validate_on_submit():
if current_user.is_authenticated:
if sessions.query(products.Products).get(product_id).existence and \
form.count.data <= prod.still_have:
prod.still_have -= form.count.data
if prod.still_have == 0:
prod.existence = 0
user = sessions.query(users.User).get(current_user.id)
if user.basket:
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
change_product = False
for item in bask:
if item[0] == product_id:
item[1] += form.count.data
change_product = True
if not change_product:
user.basket = user.basket + f'{product_id}-{form.count.data} '
else:
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
else:
user.basket = f'{product_id}-{form.count.data} '
sessions.commit()
else:
return render_template('product.html', prod=prod, filename=filename,
title=prod.title, form=form,
basket_count=session.get('basket_count', 0),
message='Товара в таком колличестве нет в наличии!')
else:
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title,
form=form, message='Вы не авторизованы')
return redirect('/basket')
return render_template('product.html', prod=prod, filename=filename,
basket_count=session.get('basket_count', 0), title=prod.title,
form=form)
@app.route('/redact_prod_plus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_plus(product_id):
sessions = db_session.create_session()
prod = sessions.query(products.Products).get(product_id)
if prod.still_have:
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] += 1
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod.still_have -= 1
sessions.commit()
return redirect('/basket')
@app.route('/redact_prod_minus/<int:product_id>', methods=['GET', 'POST'])
def redact_prod_minus(product_id):
sessions = db_session.create_session()
user = sessions.query(users.User).get(current_user.id)
bask = [[int(x.split('-')[0]), int(x.split('-')[1])] for x in
user.basket.strip().split()]
for item in bask:
if item[0] == product_id:
item[1] -= 1
bask = list(filter(lambda x: x[1] > 0, bask))
bask = ' '.join(['-'.join([str(x[0]), str(x[1])]) for x in bask])
bask += ' '
user.basket = bask
prod = sessions.query(products.Products).get(product_id)
prod.still_have += 1
sessions.commit()
return redirect('/basket')
@app.route('/change/<string:pos>')
def change(pos):
last_pos = session.get('sort', 'none')
if last_pos == pos:
session['reverse'] = not session.get('reverse', False)
else:
session['reverse'] = False
session['sort'] = pos
return redirect('/')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Старый пароль', validators=[DataRequired()])
new_password = PasswordField('Новый пароль', validators=[DataRequired()])
again_password = PasswordField('Повторите новый пароль', validators=[DataRequired()])
submit = SubmitField('Сменить пароль')
@app.route('/change_password', methods=['GET', "POST"])
@login_required
def change_password():
filename = get_profile_img()
form = ChangePasswordForm()
if form.validate_on_submit():
db_session.global_init('db/blogs.sqlite')
session_in_db = db_session.create_session()
user = session_in_db.query(users.User).get(current_user.id)
if user.hashed_password != form.old_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="Неверный пароль",
again_password_error="OK", new_password_error="OK",
filename=filename)
result = check_password(form.new_password.data)
if user.hashed_password == form.new_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", again_password_error="OK",
new_password_error="Новый пароль не должен совпадть со старым!",
filename=filename)
if result != 'OK':
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", again_password_error="OK",
new_password_error=result, filename=filename)
if form.new_password.data != form.again_password.data:
return render_template('change_password.html',
basket_count=session.get('basket_count', 0), title='Регистрация',
form=form, old_password_error="OK", new_password_error="OK",
again_password_error="Пароли не совпадают!", filename=filename)
user.hashed_password = form.new_password.data
session_in_db.commit()
return redirect('/profile')
return render_template('change_password.html', form=form,
basket_count=session.get('basket_count', 0), title="Сменить пароль",
filename=filename, old_password_error="OK", again_password_error="OK",
new_password_error="OK")
def main():
db_session.global_init("db/blogs.sqlite")
api.add_resource(product_resource.ProductListResource, '/api/v2/products')
api.add_resource(product_resource.ProductResource, '/api/v2/products/<int:product_id>')
app.run()
if __name__ == '__main__':
main()
|
[
30,
35,
37,
40,
41
] |
2,115 |
ce12ede15f4ca4a085e38e455515d8a028da8fd2
|
<mask token>
class StageOneCustomize:
<mask token>
def __init__(self, process, customize, metaConditions):
self.process = process
self.customize = customize
self.metaConditions = metaConditions
self.modifyForttH = True
self.tagList = [['LOGICERROR', 0], ['NOTAG', 0], [
'RECO_0J_PTH_0_10_Tag0', 0], ['RECO_0J_PTH_0_10_Tag1', 0], [
'RECO_0J_PTH_0_10_Tag2', 0], ['RECO_0J_PTH_GT10_Tag0', 0], [
'RECO_0J_PTH_GT10_Tag1', 0], ['RECO_0J_PTH_GT10_Tag2', 0], [
'RECO_1J_PTH_0_60_Tag0', 0], ['RECO_1J_PTH_0_60_Tag1', 0], [
'RECO_1J_PTH_0_60_Tag2', 0], ['RECO_1J_PTH_60_120_Tag0', 0], [
'RECO_1J_PTH_60_120_Tag1', 0], ['RECO_1J_PTH_60_120_Tag2', 0],
['RECO_1J_PTH_120_200_Tag0', 0], ['RECO_1J_PTH_120_200_Tag1', 0
], ['RECO_1J_PTH_120_200_Tag2', 0], ['RECO_GE2J_PTH_0_60_Tag0',
0], ['RECO_GE2J_PTH_0_60_Tag1', 0], ['RECO_GE2J_PTH_0_60_Tag2',
0], ['RECO_GE2J_PTH_60_120_Tag0', 0], [
'RECO_GE2J_PTH_60_120_Tag1', 0], ['RECO_GE2J_PTH_60_120_Tag2',
0], ['RECO_GE2J_PTH_120_200_Tag0', 0], [
'RECO_GE2J_PTH_120_200_Tag1', 0], ['RECO_GE2J_PTH_120_200_Tag2',
0], ['RECO_PTH_200_300_Tag0', 0], ['RECO_PTH_200_300_Tag1', 0],
['RECO_PTH_300_450_Tag0', 0], ['RECO_PTH_300_450_Tag1', 0], [
'RECO_PTH_450_650_Tag0', 0], ['RECO_PTH_GT650_Tag0', 0], [
'RECO_VBFTOPO_VHHAD_Tag0', 0], ['RECO_VBFTOPO_VHHAD_Tag1', 0],
['RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag1', 0], ['RECO_VBFTOPO_BSM_Tag0',
0], ['RECO_VBFTOPO_BSM_Tag1', 0], ['RECO_VBFLIKEGGH_Tag0', 0],
['RECO_VBFLIKEGGH_Tag1', 0], ['RECO_TTH_HAD_PTH_0_60_Tag0', 0],
['RECO_TTH_HAD_PTH_0_60_Tag1', 0], [
'RECO_TTH_HAD_PTH_0_60_Tag2', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag0', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag1', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag0', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag1', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag3', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag0', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag1', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag2', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag0', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag1', 0], ['RECO_WH_LEP_PTV_0_75_Tag0',
0], ['RECO_WH_LEP_PTV_0_75_Tag1', 0], [
'RECO_WH_LEP_PTV_75_150_Tag0', 0], [
'RECO_WH_LEP_PTV_75_150_Tag1', 0], [
'RECO_WH_LEP_PTV_GT150_Tag0', 0], ['RECO_ZH_LEP_Tag0', 0], [
'RECO_ZH_LEP_Tag1', 0], ['RECO_VH_MET_Tag0', 0], [
'RECO_VH_MET_Tag1', 0], ['RECO_VH_MET_Tag2', 0], [
'RECO_TTH_LEP_PTH_0_60_Tag0', 0], ['RECO_TTH_LEP_PTH_0_60_Tag1',
0], ['RECO_TTH_LEP_PTH_0_60_Tag2', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag0', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag1', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag2', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag0', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag1', 0], [
'RECO_TTH_LEP_PTH_200_300_Tag0', 0], [
'RECO_TTH_LEP_PTH_GT300_Tag0', 0], ['RECO_THQ_LEP', 0]]
if self.customize.processId == 'Data':
self.tagList.pop(1)
self.stageOneVariable = [
'stage1p2bin[57,-8.5,48.5] := tagTruth().HTXSstage1p2orderedBin']
self.tagPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.InputTag(
'flashggTHQLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggZHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHHadronicTag')), cms.PSet(TagName=cms.InputTag(
'flashggVHMetTag')), cms.PSet(TagName=cms.InputTag(
'flashggStageOneCombinedTag')))
self.customizeTagSequence()
<mask token>
def systematicVariables(self):
systematicVariables = []
systematicVariables += self.stageOneVariable
systematicVariables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass']
return systematicVariables
def noTagVariables(self):
noTagVariables = []
noTagVariables += self.stageOneVariable
for direction in ['Up', 'Down']:
noTagVariables.append(
'THU_ggH_Mu%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mu%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Res%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Res%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig01%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig01%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig12%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig12%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF2j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF2j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF3j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF3j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT60%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT60%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT120%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT120%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_qmtop%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_qmtop%s01sigma")'
% (direction, direction))
return noTagVariables
def customizeTagSequence(self):
self.process.load('flashgg.Taggers.flashggStageOneCombinedTag_cfi')
self.process.flashggTagSequence.remove(self.process.
flashggVBFDiPhoDiJetMVA)
self.process.flashggTagSequence.remove(self.process.
flashggTTHDiLeptonTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHLeptonicTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHHadronicTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHLeptonicLooseTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHHadronicTag)
self.process.flashggTagSequence.remove(self.process.flashggVBFTag)
self.process.flashggTagSequence.replace(self.process.
flashggUntagged, self.process.flashggStageOneCombinedTag)
self.process.flashggStageOneCombinedTag.rawDiphoBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDiphoBounds'])
self.process.flashggStageOneCombinedTag.rawDijetBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDijetBounds'])
self.process.flashggStageOneCombinedTag.rawGghBounds = cms.vdouble(self
.metaConditions['stageOneCombinedTag']['rawGghBounds'])
self.process.flashggStageOneCombinedTag.rawVhHadBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawVhHadBounds'])
self.metaConditions['L1Prefiring']['applyToCentral'] = True
self.process.flashggTagSorter.TagPriorityRanges = (self.
tagPriorityRanges)
self.process.flashggTagSorter.isGluonFusion = cms.bool(bool(self.
customize.processId.count('ggh')))
self.process.flashggTagSorter.applyNNLOPSweight = cms.bool(self.
customize.applyNNLOPSweight)
self.process.flashggSystTagMerger = cms.EDProducer('TagMerger', src
=cms.VInputTag('flashggTagSorter'))
def modifyWorkflowForttH(self, systlabels, phosystlabels, metsystlabels,
jetsystlabels):
for tag in ['flashggTTHLeptonicTag', 'flashggTTHHadronicTag']:
getattr(self.process, tag).DiPhotonSuffixes = cms.vstring(
phosystlabels)
getattr(self.process, tag).JetsSuffixes = cms.vstring(jetsystlabels
)
getattr(self.process, tag).MetSuffixes = cms.vstring(metsystlabels)
getattr(self.process, tag).ModifySystematicsWorkflow = cms.bool(
True)
getattr(self.process, tag).UseLargeMVAs = cms.bool(True)
self.process.p.remove(self.process.flashggTagSorter)
self.process.p.replace(self.process.flashggSystTagMerger, cms.
Sequence(self.process.flashggTTHLeptonicTag + self.process.
flashggTTHHadronicTag) * self.process.flashggTagSorter * self.
process.flashggSystTagMerger)
for systlabel in systlabels:
if systlabel == '':
continue
self.process.p.remove(getattr(self.process, 'flashggTagSorter' +
systlabel))
self.process.p.replace(self.process.flashggSystTagMerger,
getattr(self.process, 'flashggTagSorter' + systlabel) *
self.process.flashggSystTagMerger)
modifiedPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.
InputTag('flashggTHQLeptonicTag' + systlabel)), cms.PSet(
TagName=cms.InputTag('flashggTTHLeptonicTag', systlabel)),
cms.PSet(TagName=cms.InputTag('flashggZHLeptonicTag' +
systlabel)), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag' + systlabel)), cms.PSet(TagName=cms.
InputTag('flashggTTHHadronicTag', systlabel)), cms.PSet(
TagName=cms.InputTag('flashggVHMetTag' + systlabel)), cms.
PSet(TagName=cms.InputTag('flashggStageOneCombinedTag' +
systlabel)))
setattr(getattr(self.process, 'flashggTagSorter' + systlabel),
'TagPriorityRanges', modifiedPriorityRanges)
|
<mask token>
class StageOneCustomize:
<mask token>
def __init__(self, process, customize, metaConditions):
self.process = process
self.customize = customize
self.metaConditions = metaConditions
self.modifyForttH = True
self.tagList = [['LOGICERROR', 0], ['NOTAG', 0], [
'RECO_0J_PTH_0_10_Tag0', 0], ['RECO_0J_PTH_0_10_Tag1', 0], [
'RECO_0J_PTH_0_10_Tag2', 0], ['RECO_0J_PTH_GT10_Tag0', 0], [
'RECO_0J_PTH_GT10_Tag1', 0], ['RECO_0J_PTH_GT10_Tag2', 0], [
'RECO_1J_PTH_0_60_Tag0', 0], ['RECO_1J_PTH_0_60_Tag1', 0], [
'RECO_1J_PTH_0_60_Tag2', 0], ['RECO_1J_PTH_60_120_Tag0', 0], [
'RECO_1J_PTH_60_120_Tag1', 0], ['RECO_1J_PTH_60_120_Tag2', 0],
['RECO_1J_PTH_120_200_Tag0', 0], ['RECO_1J_PTH_120_200_Tag1', 0
], ['RECO_1J_PTH_120_200_Tag2', 0], ['RECO_GE2J_PTH_0_60_Tag0',
0], ['RECO_GE2J_PTH_0_60_Tag1', 0], ['RECO_GE2J_PTH_0_60_Tag2',
0], ['RECO_GE2J_PTH_60_120_Tag0', 0], [
'RECO_GE2J_PTH_60_120_Tag1', 0], ['RECO_GE2J_PTH_60_120_Tag2',
0], ['RECO_GE2J_PTH_120_200_Tag0', 0], [
'RECO_GE2J_PTH_120_200_Tag1', 0], ['RECO_GE2J_PTH_120_200_Tag2',
0], ['RECO_PTH_200_300_Tag0', 0], ['RECO_PTH_200_300_Tag1', 0],
['RECO_PTH_300_450_Tag0', 0], ['RECO_PTH_300_450_Tag1', 0], [
'RECO_PTH_450_650_Tag0', 0], ['RECO_PTH_GT650_Tag0', 0], [
'RECO_VBFTOPO_VHHAD_Tag0', 0], ['RECO_VBFTOPO_VHHAD_Tag1', 0],
['RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag1', 0], ['RECO_VBFTOPO_BSM_Tag0',
0], ['RECO_VBFTOPO_BSM_Tag1', 0], ['RECO_VBFLIKEGGH_Tag0', 0],
['RECO_VBFLIKEGGH_Tag1', 0], ['RECO_TTH_HAD_PTH_0_60_Tag0', 0],
['RECO_TTH_HAD_PTH_0_60_Tag1', 0], [
'RECO_TTH_HAD_PTH_0_60_Tag2', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag0', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag1', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag0', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag1', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag3', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag0', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag1', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag2', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag0', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag1', 0], ['RECO_WH_LEP_PTV_0_75_Tag0',
0], ['RECO_WH_LEP_PTV_0_75_Tag1', 0], [
'RECO_WH_LEP_PTV_75_150_Tag0', 0], [
'RECO_WH_LEP_PTV_75_150_Tag1', 0], [
'RECO_WH_LEP_PTV_GT150_Tag0', 0], ['RECO_ZH_LEP_Tag0', 0], [
'RECO_ZH_LEP_Tag1', 0], ['RECO_VH_MET_Tag0', 0], [
'RECO_VH_MET_Tag1', 0], ['RECO_VH_MET_Tag2', 0], [
'RECO_TTH_LEP_PTH_0_60_Tag0', 0], ['RECO_TTH_LEP_PTH_0_60_Tag1',
0], ['RECO_TTH_LEP_PTH_0_60_Tag2', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag0', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag1', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag2', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag0', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag1', 0], [
'RECO_TTH_LEP_PTH_200_300_Tag0', 0], [
'RECO_TTH_LEP_PTH_GT300_Tag0', 0], ['RECO_THQ_LEP', 0]]
if self.customize.processId == 'Data':
self.tagList.pop(1)
self.stageOneVariable = [
'stage1p2bin[57,-8.5,48.5] := tagTruth().HTXSstage1p2orderedBin']
self.tagPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.InputTag(
'flashggTHQLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggZHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHHadronicTag')), cms.PSet(TagName=cms.InputTag(
'flashggVHMetTag')), cms.PSet(TagName=cms.InputTag(
'flashggStageOneCombinedTag')))
self.customizeTagSequence()
def variablesToDump(self):
ws_variables = []
ws_variables += self.stageOneVariable
ws_variables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass',
'dZ[40,-20.,20.]:=(tagTruth().genPV().z-diPhoton().vtx().z)',
'NNLOPSweight[1,-999999.,999999.] := tagTruth().weight("NNLOPSweight")'
,
'btagReshapeNorm_TTH_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_LEP")'
,
'btagReshapeNorm_TTH_HAD[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_HAD")'
,
'btagReshapeNorm_THQ_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_THQ_LEP")'
, 'centralObjectWeight[1,-999999.,999999.] := centralWeight']
ntup_variables = ws_variables
if self.customize.dumpWorkspace:
return ws_variables
else:
return ntup_variables
def systematicVariables(self):
systematicVariables = []
systematicVariables += self.stageOneVariable
systematicVariables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass']
return systematicVariables
def noTagVariables(self):
noTagVariables = []
noTagVariables += self.stageOneVariable
for direction in ['Up', 'Down']:
noTagVariables.append(
'THU_ggH_Mu%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mu%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Res%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Res%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig01%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig01%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig12%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig12%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF2j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF2j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF3j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF3j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT60%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT60%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT120%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT120%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_qmtop%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_qmtop%s01sigma")'
% (direction, direction))
return noTagVariables
def customizeTagSequence(self):
self.process.load('flashgg.Taggers.flashggStageOneCombinedTag_cfi')
self.process.flashggTagSequence.remove(self.process.
flashggVBFDiPhoDiJetMVA)
self.process.flashggTagSequence.remove(self.process.
flashggTTHDiLeptonTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHLeptonicTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHHadronicTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHLeptonicLooseTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHHadronicTag)
self.process.flashggTagSequence.remove(self.process.flashggVBFTag)
self.process.flashggTagSequence.replace(self.process.
flashggUntagged, self.process.flashggStageOneCombinedTag)
self.process.flashggStageOneCombinedTag.rawDiphoBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDiphoBounds'])
self.process.flashggStageOneCombinedTag.rawDijetBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDijetBounds'])
self.process.flashggStageOneCombinedTag.rawGghBounds = cms.vdouble(self
.metaConditions['stageOneCombinedTag']['rawGghBounds'])
self.process.flashggStageOneCombinedTag.rawVhHadBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawVhHadBounds'])
self.metaConditions['L1Prefiring']['applyToCentral'] = True
self.process.flashggTagSorter.TagPriorityRanges = (self.
tagPriorityRanges)
self.process.flashggTagSorter.isGluonFusion = cms.bool(bool(self.
customize.processId.count('ggh')))
self.process.flashggTagSorter.applyNNLOPSweight = cms.bool(self.
customize.applyNNLOPSweight)
self.process.flashggSystTagMerger = cms.EDProducer('TagMerger', src
=cms.VInputTag('flashggTagSorter'))
def modifyWorkflowForttH(self, systlabels, phosystlabels, metsystlabels,
jetsystlabels):
for tag in ['flashggTTHLeptonicTag', 'flashggTTHHadronicTag']:
getattr(self.process, tag).DiPhotonSuffixes = cms.vstring(
phosystlabels)
getattr(self.process, tag).JetsSuffixes = cms.vstring(jetsystlabels
)
getattr(self.process, tag).MetSuffixes = cms.vstring(metsystlabels)
getattr(self.process, tag).ModifySystematicsWorkflow = cms.bool(
True)
getattr(self.process, tag).UseLargeMVAs = cms.bool(True)
self.process.p.remove(self.process.flashggTagSorter)
self.process.p.replace(self.process.flashggSystTagMerger, cms.
Sequence(self.process.flashggTTHLeptonicTag + self.process.
flashggTTHHadronicTag) * self.process.flashggTagSorter * self.
process.flashggSystTagMerger)
for systlabel in systlabels:
if systlabel == '':
continue
self.process.p.remove(getattr(self.process, 'flashggTagSorter' +
systlabel))
self.process.p.replace(self.process.flashggSystTagMerger,
getattr(self.process, 'flashggTagSorter' + systlabel) *
self.process.flashggSystTagMerger)
modifiedPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.
InputTag('flashggTHQLeptonicTag' + systlabel)), cms.PSet(
TagName=cms.InputTag('flashggTTHLeptonicTag', systlabel)),
cms.PSet(TagName=cms.InputTag('flashggZHLeptonicTag' +
systlabel)), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag' + systlabel)), cms.PSet(TagName=cms.
InputTag('flashggTTHHadronicTag', systlabel)), cms.PSet(
TagName=cms.InputTag('flashggVHMetTag' + systlabel)), cms.
PSet(TagName=cms.InputTag('flashggStageOneCombinedTag' +
systlabel)))
setattr(getattr(self.process, 'flashggTagSorter' + systlabel),
'TagPriorityRanges', modifiedPriorityRanges)
|
<mask token>
class StageOneCustomize:
"""
Customizaton class for STXS stage 1 analysis
"""
def __init__(self, process, customize, metaConditions):
self.process = process
self.customize = customize
self.metaConditions = metaConditions
self.modifyForttH = True
self.tagList = [['LOGICERROR', 0], ['NOTAG', 0], [
'RECO_0J_PTH_0_10_Tag0', 0], ['RECO_0J_PTH_0_10_Tag1', 0], [
'RECO_0J_PTH_0_10_Tag2', 0], ['RECO_0J_PTH_GT10_Tag0', 0], [
'RECO_0J_PTH_GT10_Tag1', 0], ['RECO_0J_PTH_GT10_Tag2', 0], [
'RECO_1J_PTH_0_60_Tag0', 0], ['RECO_1J_PTH_0_60_Tag1', 0], [
'RECO_1J_PTH_0_60_Tag2', 0], ['RECO_1J_PTH_60_120_Tag0', 0], [
'RECO_1J_PTH_60_120_Tag1', 0], ['RECO_1J_PTH_60_120_Tag2', 0],
['RECO_1J_PTH_120_200_Tag0', 0], ['RECO_1J_PTH_120_200_Tag1', 0
], ['RECO_1J_PTH_120_200_Tag2', 0], ['RECO_GE2J_PTH_0_60_Tag0',
0], ['RECO_GE2J_PTH_0_60_Tag1', 0], ['RECO_GE2J_PTH_0_60_Tag2',
0], ['RECO_GE2J_PTH_60_120_Tag0', 0], [
'RECO_GE2J_PTH_60_120_Tag1', 0], ['RECO_GE2J_PTH_60_120_Tag2',
0], ['RECO_GE2J_PTH_120_200_Tag0', 0], [
'RECO_GE2J_PTH_120_200_Tag1', 0], ['RECO_GE2J_PTH_120_200_Tag2',
0], ['RECO_PTH_200_300_Tag0', 0], ['RECO_PTH_200_300_Tag1', 0],
['RECO_PTH_300_450_Tag0', 0], ['RECO_PTH_300_450_Tag1', 0], [
'RECO_PTH_450_650_Tag0', 0], ['RECO_PTH_GT650_Tag0', 0], [
'RECO_VBFTOPO_VHHAD_Tag0', 0], ['RECO_VBFTOPO_VHHAD_Tag1', 0],
['RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag1', 0], ['RECO_VBFTOPO_BSM_Tag0',
0], ['RECO_VBFTOPO_BSM_Tag1', 0], ['RECO_VBFLIKEGGH_Tag0', 0],
['RECO_VBFLIKEGGH_Tag1', 0], ['RECO_TTH_HAD_PTH_0_60_Tag0', 0],
['RECO_TTH_HAD_PTH_0_60_Tag1', 0], [
'RECO_TTH_HAD_PTH_0_60_Tag2', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag0', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag1', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag0', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag1', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag3', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag0', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag1', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag2', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag0', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag1', 0], ['RECO_WH_LEP_PTV_0_75_Tag0',
0], ['RECO_WH_LEP_PTV_0_75_Tag1', 0], [
'RECO_WH_LEP_PTV_75_150_Tag0', 0], [
'RECO_WH_LEP_PTV_75_150_Tag1', 0], [
'RECO_WH_LEP_PTV_GT150_Tag0', 0], ['RECO_ZH_LEP_Tag0', 0], [
'RECO_ZH_LEP_Tag1', 0], ['RECO_VH_MET_Tag0', 0], [
'RECO_VH_MET_Tag1', 0], ['RECO_VH_MET_Tag2', 0], [
'RECO_TTH_LEP_PTH_0_60_Tag0', 0], ['RECO_TTH_LEP_PTH_0_60_Tag1',
0], ['RECO_TTH_LEP_PTH_0_60_Tag2', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag0', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag1', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag2', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag0', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag1', 0], [
'RECO_TTH_LEP_PTH_200_300_Tag0', 0], [
'RECO_TTH_LEP_PTH_GT300_Tag0', 0], ['RECO_THQ_LEP', 0]]
if self.customize.processId == 'Data':
self.tagList.pop(1)
self.stageOneVariable = [
'stage1p2bin[57,-8.5,48.5] := tagTruth().HTXSstage1p2orderedBin']
self.tagPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.InputTag(
'flashggTHQLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggZHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHHadronicTag')), cms.PSet(TagName=cms.InputTag(
'flashggVHMetTag')), cms.PSet(TagName=cms.InputTag(
'flashggStageOneCombinedTag')))
self.customizeTagSequence()
def variablesToDump(self):
ws_variables = []
ws_variables += self.stageOneVariable
ws_variables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass',
'dZ[40,-20.,20.]:=(tagTruth().genPV().z-diPhoton().vtx().z)',
'NNLOPSweight[1,-999999.,999999.] := tagTruth().weight("NNLOPSweight")'
,
'btagReshapeNorm_TTH_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_LEP")'
,
'btagReshapeNorm_TTH_HAD[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_HAD")'
,
'btagReshapeNorm_THQ_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_THQ_LEP")'
, 'centralObjectWeight[1,-999999.,999999.] := centralWeight']
ntup_variables = ws_variables
if self.customize.dumpWorkspace:
return ws_variables
else:
return ntup_variables
def systematicVariables(self):
systematicVariables = []
systematicVariables += self.stageOneVariable
systematicVariables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass']
return systematicVariables
def noTagVariables(self):
noTagVariables = []
noTagVariables += self.stageOneVariable
for direction in ['Up', 'Down']:
noTagVariables.append(
'THU_ggH_Mu%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mu%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Res%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Res%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig01%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig01%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig12%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig12%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF2j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF2j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF3j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF3j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT60%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT60%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT120%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT120%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_qmtop%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_qmtop%s01sigma")'
% (direction, direction))
return noTagVariables
def customizeTagSequence(self):
self.process.load('flashgg.Taggers.flashggStageOneCombinedTag_cfi')
self.process.flashggTagSequence.remove(self.process.
flashggVBFDiPhoDiJetMVA)
self.process.flashggTagSequence.remove(self.process.
flashggTTHDiLeptonTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHLeptonicTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHHadronicTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHLeptonicLooseTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHHadronicTag)
self.process.flashggTagSequence.remove(self.process.flashggVBFTag)
self.process.flashggTagSequence.replace(self.process.
flashggUntagged, self.process.flashggStageOneCombinedTag)
self.process.flashggStageOneCombinedTag.rawDiphoBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDiphoBounds'])
self.process.flashggStageOneCombinedTag.rawDijetBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDijetBounds'])
self.process.flashggStageOneCombinedTag.rawGghBounds = cms.vdouble(self
.metaConditions['stageOneCombinedTag']['rawGghBounds'])
self.process.flashggStageOneCombinedTag.rawVhHadBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawVhHadBounds'])
self.metaConditions['L1Prefiring']['applyToCentral'] = True
self.process.flashggTagSorter.TagPriorityRanges = (self.
tagPriorityRanges)
self.process.flashggTagSorter.isGluonFusion = cms.bool(bool(self.
customize.processId.count('ggh')))
self.process.flashggTagSorter.applyNNLOPSweight = cms.bool(self.
customize.applyNNLOPSweight)
self.process.flashggSystTagMerger = cms.EDProducer('TagMerger', src
=cms.VInputTag('flashggTagSorter'))
def modifyWorkflowForttH(self, systlabels, phosystlabels, metsystlabels,
jetsystlabels):
for tag in ['flashggTTHLeptonicTag', 'flashggTTHHadronicTag']:
getattr(self.process, tag).DiPhotonSuffixes = cms.vstring(
phosystlabels)
getattr(self.process, tag).JetsSuffixes = cms.vstring(jetsystlabels
)
getattr(self.process, tag).MetSuffixes = cms.vstring(metsystlabels)
getattr(self.process, tag).ModifySystematicsWorkflow = cms.bool(
True)
getattr(self.process, tag).UseLargeMVAs = cms.bool(True)
self.process.p.remove(self.process.flashggTagSorter)
self.process.p.replace(self.process.flashggSystTagMerger, cms.
Sequence(self.process.flashggTTHLeptonicTag + self.process.
flashggTTHHadronicTag) * self.process.flashggTagSorter * self.
process.flashggSystTagMerger)
for systlabel in systlabels:
if systlabel == '':
continue
self.process.p.remove(getattr(self.process, 'flashggTagSorter' +
systlabel))
self.process.p.replace(self.process.flashggSystTagMerger,
getattr(self.process, 'flashggTagSorter' + systlabel) *
self.process.flashggSystTagMerger)
modifiedPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.
InputTag('flashggTHQLeptonicTag' + systlabel)), cms.PSet(
TagName=cms.InputTag('flashggTTHLeptonicTag', systlabel)),
cms.PSet(TagName=cms.InputTag('flashggZHLeptonicTag' +
systlabel)), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag' + systlabel)), cms.PSet(TagName=cms.
InputTag('flashggTTHHadronicTag', systlabel)), cms.PSet(
TagName=cms.InputTag('flashggVHMetTag' + systlabel)), cms.
PSet(TagName=cms.InputTag('flashggStageOneCombinedTag' +
systlabel)))
setattr(getattr(self.process, 'flashggTagSorter' + systlabel),
'TagPriorityRanges', modifiedPriorityRanges)
|
import FWCore.ParameterSet.Config as cms
class StageOneCustomize:
"""
Customizaton class for STXS stage 1 analysis
"""
def __init__(self, process, customize, metaConditions):
self.process = process
self.customize = customize
self.metaConditions = metaConditions
self.modifyForttH = True
self.tagList = [['LOGICERROR', 0], ['NOTAG', 0], [
'RECO_0J_PTH_0_10_Tag0', 0], ['RECO_0J_PTH_0_10_Tag1', 0], [
'RECO_0J_PTH_0_10_Tag2', 0], ['RECO_0J_PTH_GT10_Tag0', 0], [
'RECO_0J_PTH_GT10_Tag1', 0], ['RECO_0J_PTH_GT10_Tag2', 0], [
'RECO_1J_PTH_0_60_Tag0', 0], ['RECO_1J_PTH_0_60_Tag1', 0], [
'RECO_1J_PTH_0_60_Tag2', 0], ['RECO_1J_PTH_60_120_Tag0', 0], [
'RECO_1J_PTH_60_120_Tag1', 0], ['RECO_1J_PTH_60_120_Tag2', 0],
['RECO_1J_PTH_120_200_Tag0', 0], ['RECO_1J_PTH_120_200_Tag1', 0
], ['RECO_1J_PTH_120_200_Tag2', 0], ['RECO_GE2J_PTH_0_60_Tag0',
0], ['RECO_GE2J_PTH_0_60_Tag1', 0], ['RECO_GE2J_PTH_0_60_Tag2',
0], ['RECO_GE2J_PTH_60_120_Tag0', 0], [
'RECO_GE2J_PTH_60_120_Tag1', 0], ['RECO_GE2J_PTH_60_120_Tag2',
0], ['RECO_GE2J_PTH_120_200_Tag0', 0], [
'RECO_GE2J_PTH_120_200_Tag1', 0], ['RECO_GE2J_PTH_120_200_Tag2',
0], ['RECO_PTH_200_300_Tag0', 0], ['RECO_PTH_200_300_Tag1', 0],
['RECO_PTH_300_450_Tag0', 0], ['RECO_PTH_300_450_Tag1', 0], [
'RECO_PTH_450_650_Tag0', 0], ['RECO_PTH_GT650_Tag0', 0], [
'RECO_VBFTOPO_VHHAD_Tag0', 0], ['RECO_VBFTOPO_VHHAD_Tag1', 0],
['RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_LOWMJJ_Tag1', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag0', 0], [
'RECO_VBFTOPO_JET3_HIGHMJJ_Tag1', 0], ['RECO_VBFTOPO_BSM_Tag0',
0], ['RECO_VBFTOPO_BSM_Tag1', 0], ['RECO_VBFLIKEGGH_Tag0', 0],
['RECO_VBFLIKEGGH_Tag1', 0], ['RECO_TTH_HAD_PTH_0_60_Tag0', 0],
['RECO_TTH_HAD_PTH_0_60_Tag1', 0], [
'RECO_TTH_HAD_PTH_0_60_Tag2', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag0', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag1', 0], [
'RECO_TTH_HAD_PTH_60_120_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag0', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag1', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag2', 0], [
'RECO_TTH_HAD_PTH_120_200_Tag3', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag0', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag1', 0], [
'RECO_TTH_HAD_PTH_200_300_Tag2', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag0', 0], [
'RECO_TTH_HAD_PTH_GT300_Tag1', 0], ['RECO_WH_LEP_PTV_0_75_Tag0',
0], ['RECO_WH_LEP_PTV_0_75_Tag1', 0], [
'RECO_WH_LEP_PTV_75_150_Tag0', 0], [
'RECO_WH_LEP_PTV_75_150_Tag1', 0], [
'RECO_WH_LEP_PTV_GT150_Tag0', 0], ['RECO_ZH_LEP_Tag0', 0], [
'RECO_ZH_LEP_Tag1', 0], ['RECO_VH_MET_Tag0', 0], [
'RECO_VH_MET_Tag1', 0], ['RECO_VH_MET_Tag2', 0], [
'RECO_TTH_LEP_PTH_0_60_Tag0', 0], ['RECO_TTH_LEP_PTH_0_60_Tag1',
0], ['RECO_TTH_LEP_PTH_0_60_Tag2', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag0', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag1', 0], [
'RECO_TTH_LEP_PTH_60_120_Tag2', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag0', 0], [
'RECO_TTH_LEP_PTH_120_200_Tag1', 0], [
'RECO_TTH_LEP_PTH_200_300_Tag0', 0], [
'RECO_TTH_LEP_PTH_GT300_Tag0', 0], ['RECO_THQ_LEP', 0]]
if self.customize.processId == 'Data':
self.tagList.pop(1)
self.stageOneVariable = [
'stage1p2bin[57,-8.5,48.5] := tagTruth().HTXSstage1p2orderedBin']
self.tagPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.InputTag(
'flashggTHQLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggZHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag')), cms.PSet(TagName=cms.InputTag(
'flashggTTHHadronicTag')), cms.PSet(TagName=cms.InputTag(
'flashggVHMetTag')), cms.PSet(TagName=cms.InputTag(
'flashggStageOneCombinedTag')))
self.customizeTagSequence()
def variablesToDump(self):
ws_variables = []
ws_variables += self.stageOneVariable
ws_variables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass',
'dZ[40,-20.,20.]:=(tagTruth().genPV().z-diPhoton().vtx().z)',
'NNLOPSweight[1,-999999.,999999.] := tagTruth().weight("NNLOPSweight")'
,
'btagReshapeNorm_TTH_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_LEP")'
,
'btagReshapeNorm_TTH_HAD[1,-999999.,999999.] := weight("btagReshapeNorm_TTH_HAD")'
,
'btagReshapeNorm_THQ_LEP[1,-999999.,999999.] := weight("btagReshapeNorm_THQ_LEP")'
, 'centralObjectWeight[1,-999999.,999999.] := centralWeight']
ntup_variables = ws_variables
if self.customize.dumpWorkspace:
return ws_variables
else:
return ntup_variables
def systematicVariables(self):
systematicVariables = []
systematicVariables += self.stageOneVariable
systematicVariables += ['CMS_hgg_mass[160,100,180]:=diPhoton().mass']
return systematicVariables
def noTagVariables(self):
noTagVariables = []
noTagVariables += self.stageOneVariable
for direction in ['Up', 'Down']:
noTagVariables.append(
'THU_ggH_Mu%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mu%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Res%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Res%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig01%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig01%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_Mig12%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_Mig12%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF2j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF2j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_VBF3j%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_VBF3j%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT60%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT60%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_PT120%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_PT120%s01sigma")'
% (direction, direction))
noTagVariables.append(
'THU_ggH_qmtop%s01sigma[1,-999999.,999999.] := getTheoryWeight("THU_ggH_qmtop%s01sigma")'
% (direction, direction))
return noTagVariables
def customizeTagSequence(self):
self.process.load('flashgg.Taggers.flashggStageOneCombinedTag_cfi')
self.process.flashggTagSequence.remove(self.process.
flashggVBFDiPhoDiJetMVA)
self.process.flashggTagSequence.remove(self.process.
flashggTTHDiLeptonTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHLeptonicTag)
self.process.flashggTagSequence.remove(self.process.
flashggTTHHadronicTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHLeptonicLooseTag)
self.process.flashggTagSequence.remove(self.process.
flashggVHHadronicTag)
self.process.flashggTagSequence.remove(self.process.flashggVBFTag)
self.process.flashggTagSequence.replace(self.process.
flashggUntagged, self.process.flashggStageOneCombinedTag)
self.process.flashggStageOneCombinedTag.rawDiphoBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDiphoBounds'])
self.process.flashggStageOneCombinedTag.rawDijetBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawDijetBounds'])
self.process.flashggStageOneCombinedTag.rawGghBounds = cms.vdouble(self
.metaConditions['stageOneCombinedTag']['rawGghBounds'])
self.process.flashggStageOneCombinedTag.rawVhHadBounds = cms.vdouble(
self.metaConditions['stageOneCombinedTag']['rawVhHadBounds'])
self.metaConditions['L1Prefiring']['applyToCentral'] = True
self.process.flashggTagSorter.TagPriorityRanges = (self.
tagPriorityRanges)
self.process.flashggTagSorter.isGluonFusion = cms.bool(bool(self.
customize.processId.count('ggh')))
self.process.flashggTagSorter.applyNNLOPSweight = cms.bool(self.
customize.applyNNLOPSweight)
self.process.flashggSystTagMerger = cms.EDProducer('TagMerger', src
=cms.VInputTag('flashggTagSorter'))
def modifyWorkflowForttH(self, systlabels, phosystlabels, metsystlabels,
jetsystlabels):
for tag in ['flashggTTHLeptonicTag', 'flashggTTHHadronicTag']:
getattr(self.process, tag).DiPhotonSuffixes = cms.vstring(
phosystlabels)
getattr(self.process, tag).JetsSuffixes = cms.vstring(jetsystlabels
)
getattr(self.process, tag).MetSuffixes = cms.vstring(metsystlabels)
getattr(self.process, tag).ModifySystematicsWorkflow = cms.bool(
True)
getattr(self.process, tag).UseLargeMVAs = cms.bool(True)
self.process.p.remove(self.process.flashggTagSorter)
self.process.p.replace(self.process.flashggSystTagMerger, cms.
Sequence(self.process.flashggTTHLeptonicTag + self.process.
flashggTTHHadronicTag) * self.process.flashggTagSorter * self.
process.flashggSystTagMerger)
for systlabel in systlabels:
if systlabel == '':
continue
self.process.p.remove(getattr(self.process, 'flashggTagSorter' +
systlabel))
self.process.p.replace(self.process.flashggSystTagMerger,
getattr(self.process, 'flashggTagSorter' + systlabel) *
self.process.flashggSystTagMerger)
modifiedPriorityRanges = cms.VPSet(cms.PSet(TagName=cms.
InputTag('flashggTHQLeptonicTag' + systlabel)), cms.PSet(
TagName=cms.InputTag('flashggTTHLeptonicTag', systlabel)),
cms.PSet(TagName=cms.InputTag('flashggZHLeptonicTag' +
systlabel)), cms.PSet(TagName=cms.InputTag(
'flashggWHLeptonicTag' + systlabel)), cms.PSet(TagName=cms.
InputTag('flashggTTHHadronicTag', systlabel)), cms.PSet(
TagName=cms.InputTag('flashggVHMetTag' + systlabel)), cms.
PSet(TagName=cms.InputTag('flashggStageOneCombinedTag' +
systlabel)))
setattr(getattr(self.process, 'flashggTagSorter' + systlabel),
'TagPriorityRanges', modifiedPriorityRanges)
|
import FWCore.ParameterSet.Config as cms
class StageOneCustomize():
"""
Customizaton class for STXS stage 1 analysis
"""
def __init__(self, process, customize, metaConditions):
self.process = process
self.customize = customize
self.metaConditions = metaConditions
self.modifyForttH = True
self.tagList = [
["LOGICERROR",0], ["NOTAG",0],
["RECO_0J_PTH_0_10_Tag0",0], ["RECO_0J_PTH_0_10_Tag1",0], ["RECO_0J_PTH_0_10_Tag2",0],
["RECO_0J_PTH_GT10_Tag0",0], ["RECO_0J_PTH_GT10_Tag1",0],["RECO_0J_PTH_GT10_Tag2",0],
["RECO_1J_PTH_0_60_Tag0",0], ["RECO_1J_PTH_0_60_Tag1",0], ["RECO_1J_PTH_0_60_Tag2",0],
["RECO_1J_PTH_60_120_Tag0",0], ["RECO_1J_PTH_60_120_Tag1",0], ["RECO_1J_PTH_60_120_Tag2",0],
["RECO_1J_PTH_120_200_Tag0",0], ["RECO_1J_PTH_120_200_Tag1",0],["RECO_1J_PTH_120_200_Tag2",0],
["RECO_GE2J_PTH_0_60_Tag0",0], ["RECO_GE2J_PTH_0_60_Tag1",0], ["RECO_GE2J_PTH_0_60_Tag2",0],
["RECO_GE2J_PTH_60_120_Tag0",0], ["RECO_GE2J_PTH_60_120_Tag1",0], ["RECO_GE2J_PTH_60_120_Tag2",0],
["RECO_GE2J_PTH_120_200_Tag0",0], ["RECO_GE2J_PTH_120_200_Tag1",0], ["RECO_GE2J_PTH_120_200_Tag2",0],
["RECO_PTH_200_300_Tag0",0], ["RECO_PTH_200_300_Tag1",0], ["RECO_PTH_300_450_Tag0",0], ["RECO_PTH_300_450_Tag1",0],
["RECO_PTH_450_650_Tag0",0], ["RECO_PTH_GT650_Tag0",0],
["RECO_VBFTOPO_VHHAD_Tag0",0], ["RECO_VBFTOPO_VHHAD_Tag1",0],
["RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag0",0], ["RECO_VBFTOPO_JET3VETO_LOWMJJ_Tag1",0],
["RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag0",0], ["RECO_VBFTOPO_JET3VETO_HIGHMJJ_Tag1",0],
["RECO_VBFTOPO_JET3_LOWMJJ_Tag0",0], ["RECO_VBFTOPO_JET3_LOWMJJ_Tag1",0],
["RECO_VBFTOPO_JET3_HIGHMJJ_Tag0",0], ["RECO_VBFTOPO_JET3_HIGHMJJ_Tag1",0],
["RECO_VBFTOPO_BSM_Tag0",0], ["RECO_VBFTOPO_BSM_Tag1",0],
["RECO_VBFLIKEGGH_Tag0",0], ["RECO_VBFLIKEGGH_Tag1",0],
["RECO_TTH_HAD_PTH_0_60_Tag0",0], ["RECO_TTH_HAD_PTH_0_60_Tag1",0], ["RECO_TTH_HAD_PTH_0_60_Tag2",0],
["RECO_TTH_HAD_PTH_60_120_Tag0",0], ["RECO_TTH_HAD_PTH_60_120_Tag1",0], ["RECO_TTH_HAD_PTH_60_120_Tag2",0],
["RECO_TTH_HAD_PTH_120_200_Tag0",0], ["RECO_TTH_HAD_PTH_120_200_Tag1",0], ["RECO_TTH_HAD_PTH_120_200_Tag2",0], ["RECO_TTH_HAD_PTH_120_200_Tag3",0],
["RECO_TTH_HAD_PTH_200_300_Tag0",0], ["RECO_TTH_HAD_PTH_200_300_Tag1",0], ["RECO_TTH_HAD_PTH_200_300_Tag2",0],
["RECO_TTH_HAD_PTH_GT300_Tag0",0], ["RECO_TTH_HAD_PTH_GT300_Tag1",0],
["RECO_WH_LEP_PTV_0_75_Tag0",0], ["RECO_WH_LEP_PTV_0_75_Tag1",0],
["RECO_WH_LEP_PTV_75_150_Tag0",0], ["RECO_WH_LEP_PTV_75_150_Tag1",0],
["RECO_WH_LEP_PTV_GT150_Tag0",0],
["RECO_ZH_LEP_Tag0",0], ["RECO_ZH_LEP_Tag1",0],
["RECO_VH_MET_Tag0",0], ["RECO_VH_MET_Tag1",0], ["RECO_VH_MET_Tag2",0],
["RECO_TTH_LEP_PTH_0_60_Tag0",0], ["RECO_TTH_LEP_PTH_0_60_Tag1",0], ["RECO_TTH_LEP_PTH_0_60_Tag2",0],
["RECO_TTH_LEP_PTH_60_120_Tag0",0], ["RECO_TTH_LEP_PTH_60_120_Tag1",0], ["RECO_TTH_LEP_PTH_60_120_Tag2",0],
["RECO_TTH_LEP_PTH_120_200_Tag0",0], ["RECO_TTH_LEP_PTH_120_200_Tag1",0],
["RECO_TTH_LEP_PTH_200_300_Tag0",0],
["RECO_TTH_LEP_PTH_GT300_Tag0",0],
["RECO_THQ_LEP",0]
]
if self.customize.processId == "Data":
self.tagList.pop(1) ## remove NoTag for data
self.stageOneVariable = ["stage1p2bin[57,-8.5,48.5] := tagTruth().HTXSstage1p2orderedBin"]
self.tagPriorityRanges = cms.VPSet(
cms.PSet(TagName = cms.InputTag('flashggTHQLeptonicTag')),
cms.PSet(TagName = cms.InputTag('flashggTTHLeptonicTag')),
cms.PSet(TagName = cms.InputTag('flashggZHLeptonicTag')),
cms.PSet(TagName = cms.InputTag('flashggWHLeptonicTag')),
cms.PSet(TagName = cms.InputTag('flashggTTHHadronicTag')),
cms.PSet(TagName = cms.InputTag('flashggVHMetTag')),
cms.PSet(TagName = cms.InputTag('flashggStageOneCombinedTag'))
)
self.customizeTagSequence()
def variablesToDump(self):
ws_variables = []
ws_variables += self.stageOneVariable
ws_variables += [
"CMS_hgg_mass[160,100,180]:=diPhoton().mass",
"dZ[40,-20.,20.]:=(tagTruth().genPV().z-diPhoton().vtx().z)",
"NNLOPSweight[1,-999999.,999999.] := tagTruth().weight(\"NNLOPSweight\")",
"btagReshapeNorm_TTH_LEP[1,-999999.,999999.] := weight(\"btagReshapeNorm_TTH_LEP\")",
"btagReshapeNorm_TTH_HAD[1,-999999.,999999.] := weight(\"btagReshapeNorm_TTH_HAD\")",
"btagReshapeNorm_THQ_LEP[1,-999999.,999999.] := weight(\"btagReshapeNorm_THQ_LEP\")",
"centralObjectWeight[1,-999999.,999999.] := centralWeight"
]
ntup_variables = ws_variables #+ [
# "truthNNLOPS[1,-999999.,999999.]:=tagTruth().weight(\"NNLOPS\")",
# "leadJetPt[1,-999999.,999999.]:=VBFMVA().dijet_LeadJPt"
# ]
if self.customize.dumpWorkspace:
return ws_variables
else:
return ntup_variables
def systematicVariables(self):
systematicVariables = []
systematicVariables += self.stageOneVariable
systematicVariables += [
"CMS_hgg_mass[160,100,180]:=diPhoton().mass"
]
return systematicVariables
def noTagVariables(self):
noTagVariables = []
noTagVariables += self.stageOneVariable
for direction in ["Up","Down"]:
noTagVariables.append("THU_ggH_Mu%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_Mu%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_Res%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_Res%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_Mig01%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_Mig01%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_Mig12%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_Mig12%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_VBF2j%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_VBF2j%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_VBF3j%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_VBF3j%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_PT60%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_PT60%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_PT120%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_PT120%s01sigma\")" % (direction,direction))
noTagVariables.append("THU_ggH_qmtop%s01sigma[1,-999999.,999999.] := getTheoryWeight(\"THU_ggH_qmtop%s01sigma\")" % (direction,direction))
return noTagVariables
def customizeTagSequence(self):
self.process.load("flashgg.Taggers.flashggStageOneCombinedTag_cfi")
## remove unneeded tags
self.process.flashggTagSequence.remove(self.process.flashggVBFDiPhoDiJetMVA)
#self.process.flashggTagSequence.remove(self.process.flashggTHQLeptonicTag) ## now included in analysis
self.process.flashggTagSequence.remove(self.process.flashggTTHDiLeptonTag)
self.process.flashggTagSequence.remove(self.process.flashggTTHLeptonicTag) ## will be added back in later
self.process.flashggTagSequence.remove(self.process.flashggTTHHadronicTag) ## will be added back in later
#self.process.flashggTagSequence.remove(self.process.flashggVHMetTag) ## now included in analysis
#self.process.flashggTagSequence.remove(self.process.flashggZHLeptonicTag) ## now included in analysis
#self.process.flashggTagSequence.remove(self.process.flashggWHLeptonicTag) ## now included in analysis
self.process.flashggTagSequence.remove(self.process.flashggVHLeptonicLooseTag)
self.process.flashggTagSequence.remove(self.process.flashggVHHadronicTag)
self.process.flashggTagSequence.remove(self.process.flashggVBFTag)
self.process.flashggTagSequence.replace(self.process.flashggUntagged,self.process.flashggStageOneCombinedTag)
## customize from meta conditions - category thresholds set here
self.process.flashggStageOneCombinedTag.rawDiphoBounds = cms.vdouble( self.metaConditions["stageOneCombinedTag"]["rawDiphoBounds"] )
self.process.flashggStageOneCombinedTag.rawDijetBounds = cms.vdouble( self.metaConditions["stageOneCombinedTag"]["rawDijetBounds"] )
self.process.flashggStageOneCombinedTag.rawGghBounds = cms.vdouble( self.metaConditions["stageOneCombinedTag"]["rawGghBounds"] )
self.process.flashggStageOneCombinedTag.rawVhHadBounds = cms.vdouble( self.metaConditions["stageOneCombinedTag"]["rawVhHadBounds"] )
## set the pre-firing to be applied
self.metaConditions["L1Prefiring"]["applyToCentral"] = True
## set tag priorities
self.process.flashggTagSorter.TagPriorityRanges = self.tagPriorityRanges
self.process.flashggTagSorter.isGluonFusion = cms.bool(bool(self.customize.processId.count("ggh")))
self.process.flashggTagSorter.applyNNLOPSweight = cms.bool(self.customize.applyNNLOPSweight)
## set the tag merging
self.process.flashggSystTagMerger = cms.EDProducer("TagMerger",src=cms.VInputTag("flashggTagSorter"))
## this adds in the ttH tags with their correct, modified systematics workflow
def modifyWorkflowForttH(self, systlabels, phosystlabels, metsystlabels, jetsystlabels):
# Set lists of systematics for each tag
for tag in ["flashggTTHLeptonicTag", "flashggTTHHadronicTag"]:
getattr(self.process, tag).DiPhotonSuffixes = cms.vstring(phosystlabels)
getattr(self.process, tag).JetsSuffixes = cms.vstring(jetsystlabels)
getattr(self.process, tag).MetSuffixes = cms.vstring(metsystlabels)
getattr(self.process, tag).ModifySystematicsWorkflow = cms.bool(True)
getattr(self.process, tag).UseLargeMVAs = cms.bool(True) # enable memory-intensive MVAs
self.process.p.remove(self.process.flashggTagSorter)
self.process.p.replace(self.process.flashggSystTagMerger, cms.Sequence(self.process.flashggTTHLeptonicTag + self.process.flashggTTHHadronicTag)*self.process.flashggTagSorter*self.process.flashggSystTagMerger)
for systlabel in systlabels:
if systlabel == "":
continue
self.process.p.remove(getattr(self.process, 'flashggTagSorter' + systlabel))
self.process.p.replace(self.process.flashggSystTagMerger, getattr(self.process, 'flashggTagSorter' + systlabel) * self.process.flashggSystTagMerger)
modifiedPriorityRanges = cms.VPSet(
cms.PSet(TagName = cms.InputTag('flashggTHQLeptonicTag'+systlabel)),
cms.PSet(TagName = cms.InputTag('flashggTTHLeptonicTag', systlabel)),
cms.PSet(TagName = cms.InputTag('flashggZHLeptonicTag'+systlabel)),
cms.PSet(TagName = cms.InputTag('flashggWHLeptonicTag'+systlabel)),
cms.PSet(TagName = cms.InputTag('flashggTTHHadronicTag', systlabel)),
cms.PSet(TagName = cms.InputTag('flashggVHMetTag'+systlabel)),
cms.PSet(TagName = cms.InputTag('flashggStageOneCombinedTag'+systlabel))
)
setattr(getattr(self.process, 'flashggTagSorter'+systlabel), 'TagPriorityRanges', modifiedPriorityRanges)
|
[
6,
7,
8,
9,
10
] |
2,116 |
cc703690151acd17430b5a9715e71a694fdeca10
|
<mask token>
|
'''
Can you print numbers from 1 to 100 without using any loop.
'''
# Use Recursion
| null | null | null |
[
0,
1
] |
2,117 |
ea3b8fe602357fa3d1de4daefce1e71a7de6e010
|
<mask token>
|
<mask token>
print(flags)
|
<mask token>
flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
print(flags)
|
<mask token>
import cv2
flags = [i for i in dir(cv2) if i.startswith('COLOR_')]
print(flags)
|
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 31 16:55:33 2018
@author: GEAR
"""
'''
在OpenCV中有超过150中进行颜色转换的方法,但我们经常用到的一般只有两种:
BGR <-> Gray 和 BGR <-> HSV
我们需要用到的函数有:cv2.cvtColor(input_image, flag),其中flag是转换类型
对于BGR <-> Gray,我们用到的flag为cv2.COLOR_BGR2GRAY,但我们要注意在OpenCV
中HSV格式中H 色彩/亮度 的取值范围是[0 179], S 饱和度 和 V 亮度的取值范围是
[0, 255]
'''
import cv2
flags = [i for i in dir(cv2) if i.startswith ('COLOR_')]
print(flags)
|
[
0,
1,
2,
3,
4
] |
2,118 |
d7876a078af8572e44b4eb16f3ec0898db73724d
|
<mask token>
|
<mask token>
for index, elements in enumerate(a):
if elements == 5:
b.append(index)
print(b)
|
a = 5, 1, 3, 5, 3, 1, 0, 9, 5, 3, 8, 6, 5, 7
b = []
for index, elements in enumerate(a):
if elements == 5:
b.append(index)
print(b)
|
a = (5, 1, 3, 5, 3, 1, 0, 9, 5, 3, 8, 6, 5, 7)
b = []
for index, elements in enumerate (a):
if elements == 5:
b.append(index)
print(b)
| null |
[
0,
1,
2,
3
] |
2,119 |
977841e0bb73cec879fbb1868f1e64102c6d8c1a
|
<mask token>
|
<mask token>
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
|
<mask token>
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root, 'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
|
import requests
import os
import numpy as np
from bs4 import BeautifulSoup
from nltk import word_tokenize
from collections import Counter
import random
from utils import save_pickle
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root, 'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path, topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path, topic, art), encoding='UTF-8') as f:
lines = f.read()
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, ' ') for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if len(tokens) == 0:
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, '%s.txt' % split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print('Building vocabulary from DUC data')
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
print('Vocabulary size: %d' % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([word_to_idx.get(symbol, word_to_idx[
'<unk>']) for symbol in seq])
for split in splits:
print('Creating %s DUC data' % split)
data = []
with open(os.path.join(root, '%s.txt' % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:]
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic))
num_words += length
print('%s samples: %d' % (split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, '%s.pkl' % split))
num_sents += len(data)
print('Average length: %.2f' % (num_words / num_sents))
|
import requests
import os
import numpy as np
from bs4 import BeautifulSoup
from nltk import word_tokenize
from collections import Counter
import random
from utils import save_pickle
root = 'data'
ratios = [('train', 0.85), ('valid', 0.05), ('test', 0.1)]
max_len = 64
vocab_size = 16000
data = []
path = os.path.join(root,'main')
topics = os.listdir(path)
i = 0
for topic in topics:
i += 1
arts = os.listdir(os.path.join(path,topic))
j = 0
for art in arts:
j += 1
with open(os.path.join(path,topic,art),encoding='UTF-8') as f:
#lines = unicode(f.read(), errors='ignore')
lines = f.read()
#print(type(lines))
#print(i,j)
soup = BeautifulSoup(lines, 'html.parser')
for text in soup.find_all('p'):
# replace punctuation characters with spaces
text = text.get_text()
filters = '!"\'#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'
translate_dict = dict((c, " ") for c in filters)
translate_map = str.maketrans(translate_dict)
text = text.translate(translate_map)
tokens = word_tokenize(text)
lines = [str(i)] + [str(j)] + tokens
if(len(tokens)==0):
break
else:
data.append(' '.join(lines))
if True:
random.shuffle(data)
num_samples = len(data)
for split, ratio in ratios:
with open(os.path.join(root, "%s.txt"%split), 'w') as f:
length = int(num_samples * ratio)
f.write('\n'.join(data[:length]))
data = data[length:]
print("Building vocabulary from DUC data")
counter = Counter()
with open(os.path.join(root, 'train.txt')) as f:
for line in f:
words = line.strip().lower().split()[:max_len]
counter.update(words)
word_to_idx = {'<pad>': 0, '<unk>': 1, '<bos>': 2, '<eos>': 3}
vocab = [word for word, freq in counter.most_common() if freq > 5]
for word in vocab[:vocab_size - 2]:
word_to_idx[word] = len(word_to_idx)
# exclude <bos> and <pad> symbols
print("Vocabulary size: %d" % (len(word_to_idx) - 2))
save_pickle(word_to_idx, os.path.join(root, 'vocab.pkl'))
splits = ['train', 'valid', 'test']
num_sents, num_words = 0, 0
func = lambda seq: np.array([
word_to_idx.get(symbol, word_to_idx['<unk>']) for symbol in seq])
for split in splits:
print("Creating %s DUC data" % split)
data = []
with open(os.path.join(root, "%s.txt" % split)) as f:
for line in f:
words = line.strip().lower().split()[:max_len + 2]
topic, art, words = int(words[0]), int(words[1]), words[2:] ###
length = len(words)
paddings = ['<pad>'] * (max_len - length)
enc_input = func(words + paddings)
dec_input = func(['<bos>'] + words + paddings)
target = func(words + ['<eos>'] + paddings)
data.append((enc_input, dec_input, target, length, topic)) ###
num_words += length
print("%s samples: %d" %(split.capitalize(), len(data)))
save_pickle(data, os.path.join(root, "%s.pkl" % split))
num_sents += len(data)
print("Average length: %.2f" %(num_words / num_sents))
|
[
0,
1,
2,
3,
4
] |
2,120 |
cc924892afe179e55166ea9b237b2bfe8ea900df
|
<mask token>
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
<mask token>
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
<mask token>
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
<mask token>
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
<mask token>
|
<mask token>
root.title('Tic-Tac-Toe')
root.geometry('600x600')
<mask token>
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
<mask token>
global connection_established
<mask token>
sock.connect((HOST, PORT))
<mask token>
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
<mask token>
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
<mask token>
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
<mask token>
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
<mask token>
b1.grid(row=0, column=0)
<mask token>
b2.grid(row=0, column=1)
<mask token>
b3.grid(row=0, column=2)
<mask token>
b4.grid(row=1, column=0)
<mask token>
b5.grid(row=1, column=1)
<mask token>
b6.grid(row=1, column=2)
<mask token>
b7.grid(row=2, column=0)
<mask token>
b8.grid(row=2, column=1)
<mask token>
b9.grid(row=2, column=2)
<mask token>
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
|
<mask token>
root = Tk()
root.title('Tic-Tac-Toe')
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image=photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
b1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b1))
b1.grid(row=0, column=0)
b2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b2))
b2.grid(row=0, column=1)
b3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b3))
b3.grid(row=0, column=2)
b4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b4))
b4.grid(row=1, column=0)
b5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b5))
b5.grid(row=1, column=1)
b6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b6))
b6.grid(row=1, column=2)
b7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b7))
b7.grid(row=2, column=0)
b8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b8))
b8.grid(row=2, column=1)
b9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b9))
b9.grid(row=2, column=2)
labels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',
height=2, justify='center')
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
|
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import time
import socket
import threading
root = Tk()
root.title('Tic-Tac-Toe')
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is', data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b1.config(state='disabled')
elif data == 'button2':
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b2.config(state='disabled')
elif data == 'button3':
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b3.config(state='disabled')
elif data == 'button4':
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b4.config(state='disabled')
elif data == 'button5':
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b5.config(state='disabled')
elif data == 'button6':
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b6.config(state='disabled')
elif data == 'button7':
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b7.config(state='disabled')
elif data == 'button8':
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b8.config(state='disabled')
elif data == 'button9':
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state='normal')
b9.config(state='disabled')
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image=photo)
label.image = photo
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image=photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand=YES)
root.after(5000, lambda : root.destroy())
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1['text'] == 'X' and b2['text'] == 'X' and b3['text'] == 'X':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b4['text'] == 'X' and b5['text'] == 'X' and b6['text'] == 'X':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b7['text'] == 'X' and b8['text'] == 'X' and b9['text'] == 'X':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b4['text'] == 'X' and b7['text'] == 'X':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b2['text'] == 'X' and b5['text'] == 'X' and b8['text'] == 'X':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b6['text'] == 'X' and b9['text'] == 'X':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'X' and b5['text'] == 'X' and b9['text'] == 'X':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b3['text'] == 'X' and b5['text'] == 'X' and b7['text'] == 'X':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!X Wins!!!!!!!!')
elif b1['text'] == 'O' and b2['text'] == 'O' and b3['text'] == 'O':
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b4['text'] == 'O' and b5['text'] == 'O' and b6['text'] == 'O':
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b7['text'] == 'O' and b8['text'] == 'O' and b9['text'] == 'O':
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b4['text'] == 'O' and b7['text'] == 'O':
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b2['text'] == 'O' and b5['text'] == 'O' and b8['text'] == 'O':
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b6['text'] == 'O' and b9['text'] == 'O':
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b1['text'] == 'O' and b5['text'] == 'O' and b9['text'] == 'O':
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
elif b3['text'] == 'O' and b5['text'] == 'O' and b7['text'] == 'O':
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state='disabled')
messagebox.showinfo('Winner', 'Congo!!!!!!!O Wins!!!!!!!!')
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b['text'] == '' and b['state'] != 'disabled':
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state='disabled')
b1 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b1))
b1.grid(row=0, column=0)
b2 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b2))
b2.grid(row=0, column=1)
b3 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b3))
b3.grid(row=0, column=2)
b4 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b4))
b4.grid(row=1, column=0)
b5 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b5))
b5.grid(row=1, column=1)
b6 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b6))
b6.grid(row=1, column=2)
b7 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b7))
b7.grid(row=2, column=0)
b8 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b8))
b8.grid(row=2, column=1)
b9 = Button(New, text='', font=('Verdana', 20), height=3, width=6, bg=
'SystemButtonFace', command=lambda : b_click(b9))
b9.grid(row=2, column=2)
labels = Label(New, fg='white', bg='black', pady=1, text='Opponent Turn ',
height=2, justify='center')
labels.grid(row=3, column=0)
for w in New.winfo_children():
w.configure(state='disabled')
New.mainloop()
|
from tkinter import *
from tkinter import messagebox
from tkinter import ttk
from PIL import Image, ImageTk
import time
import socket
import threading
root = Tk()
root.title("Tic-Tac-Toe")
root.geometry('600x600')
winner = False
def start_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
HOST = '127.0.0.1'
PORT = 65432
global connection_established
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
connection_established = True
def receive_data():
while True:
data = sock.recv(1024).decode()
print('decoded is',data)
if data == 'button':
labels.config(text="My Turn or O's Turn")
b1.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b1.config(state="disabled")
elif data == 'button2' :
labels.config(text="My Turn or O's Turn")
b2.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b2.config(state="disabled")
elif data == 'button3' :
labels.config(text="My Turn or O's Turn")
b3.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b3.config(state="disabled")
elif data == 'button4' :
labels.config(text="My Turn or O's Turn")
b4.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b4.config(state="disabled")
elif data == 'button5' :
labels.config(text="My Turn or O's Turn")
b5.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b5.config(state="disabled")
elif data == 'button6' :
labels.config(text="My Turn or O's Turn")
b6.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b6.config(state="disabled")
elif data == 'button7' :
labels.config(text="My Turn or O's Turn")
b7.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b7.config(state="disabled")
elif data == 'button8' :
labels.config(text="My Turn or O's Turn")
b8.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b8.config(state="disabled")
elif data == 'button9' :
labels.config(text="My Turn or O's Turn")
b9.config(text='X')
for w in New.winfo_children():
w.configure(state="normal")
b9.config(state="disabled")
start_thread(receive_data)
def resize_image(event):
new_width = event.width
new_height = event.height
image = copy_of_image.resize((new_width, new_height))
photo = ImageTk.PhotoImage(image)
label.config(image = photo)
label.image = photo #avoid garbage collection
image = Image.open('C:\\Users\\User\\Any_Path\\Tic-tac-toe1.png')
copy_of_image = image.copy()
photo = ImageTk.PhotoImage(image)
label = ttk.Label(root, image = photo)
label.bind('<Configure>', resize_image)
label.pack(fill=BOTH, expand = YES)
root.after(5000, lambda: root.destroy()) # Destroy the widget after 30 seconds
root.mainloop()
New = Tk()
New.title('Tic-Tac-Toe')
New.iconbitmap('C:/Users/jainh/Downloads/Tic-tac-toe1.png')
clicked = 'Y'
def checkwin():
global winner
winner = False
if b1["text"] == "X" and b2["text"] == "X" and b3["text"] == "X":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b4["text"] == "X" and b5["text"] == "X" and b6["text"] == "X":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b7["text"] == "X" and b8["text"] == "X" and b9["text"] == "X":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b4["text"] == "X" and b7["text"] == "X":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b2["text"] == "X" and b5["text"] == "X" and b8["text"] == "X":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b6["text"] == "X" and b9["text"] == "X":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b1["text"] == "X" and b5["text"] == "X" and b9["text"] == "X":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
elif b3["text"] == "X" and b5["text"] == "X" and b7["text"] == "X":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!X Wins!!!!!!!!")
###################################
elif b1["text"] == "O" and b2["text"] == "O" and b3["text"] == "O":
b1.config(bg='green')
b2.config(bg='green')
b3.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b4["text"] == "O" and b5["text"] == "O" and b6["text"] == "O":
b4.config(bg='green')
b5.config(bg='green')
b6.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b7["text"] == "O" and b8["text"] == "O" and b9["text"] == "O":
b7.config(bg='green')
b8.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b4["text"] == "O" and b7["text"] == "O":
b1.config(bg='green')
b4.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b2["text"] == "O" and b5["text"] == "O" and b8["text"] == "O":
b2.config(bg='green')
b5.config(bg='green')
b8.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b6["text"] == "O" and b9["text"] == "O":
b3.config(bg='green')
b6.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
elif b1["text"] == "O" and b5["text"] == "O" and b9["text"] == "O":
b1.config(bg='green')
b5.config(bg='green')
b9.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo('Winner',"Congo!!!!!!!O Wins!!!!!!!!")
elif b3["text"] == "O" and b5["text"] == "O" and b7["text"] == "O":
b3.config(bg='green')
b5.config(bg='green')
b7.config(bg='green')
winner = True
for w in New.winfo_children():
w.configure(state="disabled")
messagebox.showinfo("Winner","Congo!!!!!!!O Wins!!!!!!!!")
def b_click(b):
to_send = str(b)
to_send = to_send.replace('.', '')
to_send = str(to_send.replace('!', ''))
print(to_send)
global clicked
if b["text"] == '' and b['state'] != 'disabled' :
labels.config(text="X's Turn")
b.configure(state=DISABLED)
b['text'] = 'O'
checkwin()
if connection_established == True:
sock.send(to_send.encode())
for w in New.winfo_children():
w.configure(state="disabled")
b1 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b1))
b1.grid(row=0,column=0)
b2 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b2))
b2.grid(row=0,column=1)
b3 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b3))
b3.grid(row=0,column=2)
b4 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b4))
b4.grid(row=1,column=0)
b5 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b5))
b5.grid(row=1,column=1)
b6 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b6))
b6.grid(row=1,column=2)
b7 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b7))
b7.grid(row=2,column=0)
b8 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b8))
b8.grid(row=2,column=1)
b9 = Button(New, text='',font=('Verdana',20),height=3,width=6,bg="SystemButtonFace",command=lambda:b_click(b9))
b9.grid(row=2,column=2)
labels = Label(New, fg="white",bg="black", pady=1,text="Opponent Turn ",height=2,justify="center")
labels.grid(row=3,column=0)
for w in New.winfo_children():
w.configure(state="disabled")
#menu = Menu(New)
#New.config(menu=menu)
#options = Menu(menu,tearoff=False)
New.mainloop()
|
[
5,
6,
7,
8,
9
] |
2,121 |
2af590ad11704ecf21489a5d546e61f40dcceee6
|
<mask token>
|
<mask token>
admin.site.register(Pack)
admin.site.register(Cliente)
|
from django.contrib import admin
from .models import Cliente, Pack
admin.site.register(Pack)
admin.site.register(Cliente)
|
from django.contrib import admin
from .models import Cliente, Pack
# Register your models here.
admin.site.register(Pack)
admin.site.register(Cliente)
| null |
[
0,
1,
2,
3
] |
2,122 |
92317996f884befd646138cd3a3dc3f8345679f4
|
<mask token>
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
<mask token>
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
<mask token>
|
<mask token>
sys.path.append('../')
<mask token>
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<mask token>
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
|
<mask token>
sys.path.append('../')
<mask token>
p = Person()
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<mask token>
valuecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcorner=True)[0]
valuecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcenter=True)[0]
valuerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)))[0]
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
|
import sys
import os
import numpy as np
import math
sys.path.append('../')
from sir.improveagent import *
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist
import networkx as nx
p = Person()
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<mask token>
valuecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcorner=True)[0]
valuecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcenter=True)[0]
valuerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)))[0]
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
|
import sys
import os
import numpy as np
import math
sys.path.append("../")
from sir.improveagent import *
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
#from sklearn.neighbors import BallTree
from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist
import networkx as nx
p = Person()
def run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
##we need to change the code for the case start people infected
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
#may have problem here
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()< k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [i/N for i in recover]
newsuspect = [s/N for s in suspect]
newinfect = [i/N for i in infect]
plt.plot(range(T+1),newrecover,label = "r: percentage of removed ")
plt.plot(range(T+1),newsuspect,label = "s: percentage of susceptible")
plt.plot(range(T+1),newinfect,label = "i: percentage of infected")
plt.xlabel("T")
plt.ylabel("percentage")
plt.title("Percentage of Population, Discrete")
plt.legend()
plt.show()
#We run a simulation here,use the default value of p and q
run_Simulation2(0.6,N=20000,T = 30,start=10)
def checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()<k:
pop[j].get_recovered()
return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist)
plt.title("centerplot")
plt.xlabel("p")
plt.ylabel("total number of individuals infected")
plt.title("Total Number of Individuals Infected vs p")
plt.show()
plotcenterrange()
"""
def plotrandomcornerrange():
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
infectlist2 = []
infectlist3 = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist,label = "corner")
plt.plot(np.hstack((plist1,plist)),infectlist2,label = "random")
plt.plot(np.hstack((plist1,plist)),infectlist3,label = "center")
plt.title("Change from random corner center")
plt.xlabel("change of p")
plt.ylabel("change of total infected people")
plt.legend()
plt.show()
"""
#plotrandomcornerrange()
#no need for us to use this function
valuecorner = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0]
valuecenter = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0]
valuerandom = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)))[0]
print("p = 0.05, starting randomly, the total infected number is "+ str(valuerandom))
print("p = 0.05, starting from corner, the total infected number is "+ str(valuecorner))
print("p = 0.05, starting from center, the total infected number is "+ str(valuecenter))
|
[
3,
4,
5,
6,
7
] |
2,123 |
cffcfa08cd919f93dfe2ab8dc676efc76feafab3
|
<mask token>
def create_axes(length, both=False, text=False, font=_glut.
GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0:
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both:
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
_gl.glEnd()
if text:
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,
linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == '\n':
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error(
'Actual OpenGL version doest not support glutBitmapCharacter function'
)
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
<mask token>
|
<mask token>
def create_axes(length, both=False, text=False, font=_glut.
GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0:
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both:
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
_gl.glEnd()
if text:
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,
linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == '\n':
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error(
'Actual OpenGL version doest not support glutBitmapCharacter function'
)
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
<mask token>
_UTILS_COLOR_BLACK = [0, 0, 0]
_UTILS_COLOR_WHITE = [1, 1, 1]
_UTILS_ERRS = [False]
def print_gl_error(err_msg):
"""
Prints an OpenGL error to console.
:param err_msg: Error message
:type err_msg: basestring
"""
if len(err_msg) == 0:
return
print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)
def create_axes(length, both=False, text=False, font=_glut.
GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0:
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both:
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
_gl.glEnd()
if text:
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,
linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == '\n':
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error(
'Actual OpenGL version doest not support glutBitmapCharacter function'
)
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
<mask token>
from __future__ import print_function
from PyOpenGLtoolbox.geometry import draw_vertex_list
from PyOpenGLtoolbox.mathlib import Point3
import sys as _sys
import OpenGL.GL as _gl
import OpenGL.GLUT as _glut
_UTILS_COLOR_BLACK = [0, 0, 0]
_UTILS_COLOR_WHITE = [1, 1, 1]
_UTILS_ERRS = [False]
def print_gl_error(err_msg):
"""
Prints an OpenGL error to console.
:param err_msg: Error message
:type err_msg: basestring
"""
if len(err_msg) == 0:
return
print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)
def create_axes(length, both=False, text=False, font=_glut.
GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0:
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both:
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
_gl.glEnd()
if text:
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24,
linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == '\n':
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error(
'Actual OpenGL version doest not support glutBitmapCharacter function'
)
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
# coding=utf-8
"""
PYOPENGL-TOOLBOX UTILS
General purpouse functions.
MIT License
Copyright (c) 2015-2019 Pablo Pizarro R.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Library imports
from __future__ import print_function
from PyOpenGLtoolbox.geometry import draw_vertex_list
from PyOpenGLtoolbox.mathlib import Point3
import sys as _sys
# noinspection PyPep8Naming
import OpenGL.GL as _gl
# noinspection PyPep8Naming
import OpenGL.GLUT as _glut
# Constants
_UTILS_COLOR_BLACK = [0, 0, 0]
_UTILS_COLOR_WHITE = [1, 1, 1]
_UTILS_ERRS = [False]
def print_gl_error(err_msg):
"""
Prints an OpenGL error to console.
:param err_msg: Error message
:type err_msg: basestring
"""
if len(err_msg) == 0:
return
print('[GL-ERROR] {0}'.format(err_msg), file=_sys.stderr)
# noinspection PyUnresolvedReferences
def create_axes(length, both=False, text=False, font=_glut.GLUT_BITMAP_HELVETICA_18):
"""
Create axes system.
:param length: Axes length
:param both: Both axes
:param text: Show axes names (x,y,z)
:param font: Font
:type length: float, int
:type both: bool
:type text: bool
:type font: int
:return: OpenGL list
"""
if length > 0: # Valid length
# Crate points
x = Point3(length, 0, 0)
y = Point3(0, length, 0)
z = Point3(0, 0, length)
o = Point3()
# Create list
lista = _gl.glGenLists(1)
_gl.glNewList(lista, _gl.GL_COMPILE)
# Init primitve
_gl.glBegin(_gl.GL_LINES)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
if both: # Draw axes in both directions
x = Point3(-length, 0, 0)
y = Point3(0, -length, 0)
z = Point3(0, 0, -length)
_gl.glColor4fv([1, 0, 0, 1])
draw_vertex_list([o, x])
_gl.glColor4fv([0, 1, 0, 1])
draw_vertex_list([o, y])
_gl.glColor4fv([0, 0, 1, 1])
draw_vertex_list([o, z])
# End primitive
_gl.glEnd()
if text: # Draw axes names
draw_text('x', Point3(length + 60, 0, -15), [1, 0, 0], font)
draw_text('y', Point3(0, length + 50, -15), [0, 1, 0], font)
draw_text('z', Point3(+0, +0, length + 50), [0, 0, 1], font)
if both:
draw_text('-x', Point3(-length - 60, 0, -15), [1, 0, 0], font)
draw_text('-y', Point3(0, -length - 70, -15), [0, 1, 0], font)
draw_text('-z', Point3(+0, +0, -length - 80), [0, 0, 1], font)
# Returns list
_gl.glEndList()
return lista
else:
raise Exception('Axes length must be positive, greater than zero')
# noinspection PyUnresolvedReferences
def draw_text(text, pos, color=None, font=_glut.GLUT_BITMAP_TIMES_ROMAN_24, linespace=20):
"""Dibuja un texto en una posicon dada por un punto point3"""
if color is None:
color = _UTILS_COLOR_WHITE
_gl.glColor3fv(color)
if isinstance(pos, Point3):
x = pos.get_x()
y = pos.get_y()
z = pos.get_z()
_gl.glRasterPos3f(x, y, z)
for char in text:
if char == "\n":
y += linespace
_gl.glRasterPos3f(x, y, z)
else:
# noinspection PyBroadException
try:
glutBitmapCharacter(font, ord(char))
except:
if not _UTILS_ERRS[0]:
print_gl_error('Actual OpenGL version doest not support glutBitmapCharacter function')
_UTILS_ERRS[0] = True
else:
raise Exception('Point must be Point3 type')
def get_rgb_normalized(r, g, b, a=1.0):
"""
Return rgb color normalized (from 0 to 1).
:param r: Red color
:param g: Green color
:param b: Blue color
:param a: Alpha
:type r: float, int
:type g: float, int
:type b: float, int
:type a: float
:return: RGBA tuple
:rtype: tuple
"""
if r <= 1 and g <= 1 and b <= 1:
return r, g, b, a
return r / 255.0, g / 255.0, b / 255.0, a
|
[
2,
3,
5,
6,
7
] |
2,124 |
817d7259b3607f3a94d2f363c9684f733ee87d37
|
<mask token>
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
<mask token>
class Author(models.Model):
<mask token>
<mask token>
<mask token>
<mask token>
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
<mask token>
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
from django.db import models
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True, max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
from django.db import models
# Create your models here.
class Author(models.Model):
AuthorID = models.IntegerField(primary_key=True)
Name = models.CharField(max_length=200)
Age = models.IntegerField(max_length=50)
Country = models.CharField(max_length=100)
class Book(models.Model):
ISBN = models.CharField(primary_key=True,max_length=100)
Title = models.CharField(max_length=200)
AuthorID = models.IntegerField(max_length=100)
Publisher = models.CharField(max_length=200)
PublishDate = models.CharField(max_length=200)
Price = models.FloatField(max_length=200)
|
[
2,
3,
4,
5,
6
] |
2,125 |
2a8032c23e3c7aa3a7b0593c79db7adbc0353f93
|
<mask token>
class button:
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self, win, outline=None):
if outline:
pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.
width + 4, self.height + 4), 0)
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,
self.height), 0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0, 0, 0))
win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2
), self.y + (self.height / 2 - text.get_height() / 2)))
def isOver(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
<mask token>
def mouseClick(screen):
x, y = pygame.mouse.get_pos()
if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):
pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)
return True, x, y
else:
print('Out of bounds!')
return False, x, y
def skeleExit(win):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
win.blit(aryadelight, (0, 0))
pygame.display.update()
xaxis = 100
for i in range(1, 42):
image = str(i) + '.png'
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250, 200))
text('Exiting...', win, xaxis + 20, 600)
pygame.display.update()
sleep(0.09)
<mask token>
def redrawMainWin(screen):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(aryadelight, (0, 0))
mapButton.draw(screen, (0, 0, 0))
instructionText(
'(Choose your cuisines, preferences and budget for the meal here!)',
screen, 215, 320)
predictButton.draw(screen, (0, 0, 0))
instructionText('(Find the nearest canteen!)', screen, 132, 470)
exitButton.draw(screen, (0, 0, 0))
ice = pygame.image.load(os.path.join('ice.png'))
screen.blit(ice, (500, 670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))
screen.blit(creator, (535, 670))
<mask token>
def redrawSearchWin(screen, x, y):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
return top3
<mask token>
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
new_list = list(set(new_list))
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return new_list
def calc_dis(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
<mask token>
|
<mask token>
class button:
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self, win, outline=None):
if outline:
pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.
width + 4, self.height + 4), 0)
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,
self.height), 0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0, 0, 0))
win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2
), self.y + (self.height / 2 - text.get_height() / 2)))
def isOver(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
<mask token>
def instructionText(text, win, x, y):
font = pygame.font.SysFont('Arial', 20)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
<mask token>
def mouseClick(screen):
x, y = pygame.mouse.get_pos()
if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):
pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)
return True, x, y
else:
print('Out of bounds!')
return False, x, y
def skeleExit(win):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
win.blit(aryadelight, (0, 0))
pygame.display.update()
xaxis = 100
for i in range(1, 42):
image = str(i) + '.png'
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250, 200))
text('Exiting...', win, xaxis + 20, 600)
pygame.display.update()
sleep(0.09)
<mask token>
def redrawMainWin(screen):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(aryadelight, (0, 0))
mapButton.draw(screen, (0, 0, 0))
instructionText(
'(Choose your cuisines, preferences and budget for the meal here!)',
screen, 215, 320)
predictButton.draw(screen, (0, 0, 0))
instructionText('(Find the nearest canteen!)', screen, 132, 470)
exitButton.draw(screen, (0, 0, 0))
ice = pygame.image.load(os.path.join('ice.png'))
screen.blit(ice, (500, 670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))
screen.blit(creator, (535, 670))
<mask token>
def redrawSearchWin(screen, x, y):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
return top3
<mask token>
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
new_list = list(set(new_list))
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return new_list
def calc_dis(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
<mask token>
|
<mask token>
class button:
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self, win, outline=None):
if outline:
pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.
width + 4, self.height + 4), 0)
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,
self.height), 0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0, 0, 0))
win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2
), self.y + (self.height / 2 - text.get_height() / 2)))
def isOver(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
<mask token>
def text(text, win, x, y):
font = pygame.font.SysFont('freesansbold.ttf', 50)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
def instructionText(text, win, x, y):
font = pygame.font.SysFont('Arial', 20)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
<mask token>
def mouseClick(screen):
x, y = pygame.mouse.get_pos()
if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):
pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)
return True, x, y
else:
print('Out of bounds!')
return False, x, y
def skeleExit(win):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
win.blit(aryadelight, (0, 0))
pygame.display.update()
xaxis = 100
for i in range(1, 42):
image = str(i) + '.png'
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250, 200))
text('Exiting...', win, xaxis + 20, 600)
pygame.display.update()
sleep(0.09)
<mask token>
def redrawMap(screen):
NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))
screen.blit(NTUmap, (0, 0))
for x in range(50, 900, 50):
pygame.draw.rect(screen, (255, 0, 0), (x, 0, 1, 700), 0)
for y in range(50, 700, 50):
pygame.draw.rect(screen, (255, 0, 0), (0, y, 900, 1), 0)
text('Please click on your current location!', screen, 200, 100)
<mask token>
def redrawMainWin(screen):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(aryadelight, (0, 0))
mapButton.draw(screen, (0, 0, 0))
instructionText(
'(Choose your cuisines, preferences and budget for the meal here!)',
screen, 215, 320)
predictButton.draw(screen, (0, 0, 0))
instructionText('(Find the nearest canteen!)', screen, 132, 470)
exitButton.draw(screen, (0, 0, 0))
ice = pygame.image.load(os.path.join('ice.png'))
screen.blit(ice, (500, 670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))
screen.blit(creator, (535, 670))
<mask token>
def redrawSearchWin(screen, x, y):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
return top3
def complicatedSearchWin(screen, top3):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
<mask token>
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
new_list = list(set(new_list))
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return new_list
def calc_dis(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
<mask token>
|
import pygame
import os
from time import sleep
screen = pygame.display.set_mode((900, 700))
screen.fill((255, 255, 255))
pygame.display.set_caption('NTUFOODIERECOMMENDSYSTEM')
<mask token>
canteen_list = {'Food Court 1': [12, 3.5, ['Korean', 'Japanese', 'Western'],
2100, ['Halal', 'Non-Halal/Non-Vegetarian'], (442, 473)],
'Food Court 2': [10, 3.6, ['Korean', 'Chinese', 'Malay'], 2100, [
'Halal', 'Vegetarian', 'Non-Halal/Non-Vegetarian'], (477, 409)],
'Food Court 4': [10, 3, ['Chinese', 'Western'], 2100, [
'Non-Halal/Non-Vegetarian'], (358, 526)], 'Food Court 9': [10, 3.5, [
'Chinese'], 2100, ['Halal', 'Vegetarian', 'Non-Halal/Non-Vegetarian'],
(582, 288)], 'Food Court 11': [10, 2.5, ['Chinese', 'Indian',
'Japanese', 'Western'], 2100, ['Halal', 'Vegetarian',
'Non-Halal/Non-Vegetarian'], (682, 243)], 'Food Court 13': [9, 2, [
'Western', 'Korean', 'Japanese', 'Chinese'], 2100, ['Halal',
'Vegetarian', 'Non-Halal/Non-Vegetarian'], (445, 176)], 'Food Court 14':
[8, 3, ['Western', 'Chinese', 'Korean', 'Malay'], 2100, ['Halal',
'Vegetarian', 'Non-Halal/Non-Vegetarian'], (509, 182)], 'Food Court 16':
[10, 3.3, ['Japanese', 'Chinese', 'Korean', 'Indian'], 2100, ['Halal',
'Vegetarian', 'Non-Halal/Non-Vegetarian'], (405, 221)],
'Tamarind Food Court': [10, 3, ['Malay', 'Chinese', 'Korean', 'Western'
], 2100, ['Halal', 'Non-Halal', 'Vegetarian',
'Non-Halal/Non-Vegetarian'], (627, 200)], 'Pioneer Food Court': [20,
2.3, ['Thai', 'Chinese'], 0, ['Vegetarian', 'Non-Halal/Non-Vegetarian'],
(497, 561)], 'North Spine Food Court': [10, 2.5, ['Korean', 'Japanese',
'Chinese', 'Western', 'Malay'], 2100, ['Vegetarian',
'Non-Halal/Non-Vegetarian'], (275, 293)], 'North Spine Plaza': [10, 4,
['Western', 'Korean'], 2130, ['Vegetarian', 'Halal',
'Non-Halal/Non-Vegetarian'], (287, 339)], 'South Spine Food Court': [10,
2, ['Chinese', 'Malay', 'Korean', 'Japanese', 'Western'], 2100, [
'Vegetarian', 'Halal', 'Non-Halal/Non-Vegetarian'], (227, 496)],
'Quad Cafe': [10, 2.4, ['Korean', 'Chinese', 'Indian', 'Malay'], 2100,
['Vegetarian', 'Halal', 'Non-Halal/Non-Vegetarian'], (224, 351)],
'Coffee Bean': [20, 4, ['Western'], 2000, ['Vegetarian', 'Halal',
'Non-Halal/Non-Vegetarian'], (219, 389)], 'North Hill Food Court': [10,
3.8, ['Chinese', 'Malay', 'Indian'], 2100, ['Vegetarian', 'Halal',
'Non-Halal/Non-Vegetarian'], (720, 314)]}
<mask token>
class button:
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self, win, outline=None):
if outline:
pygame.draw.rect(win, outline, (self.x - 2, self.y - 2, self.
width + 4, self.height + 4), 0)
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width,
self.height), 0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0, 0, 0))
win.blit(text, (self.x + (self.width / 2 - text.get_width() / 2
), self.y + (self.height / 2 - text.get_height() / 2)))
def isOver(self, pos):
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
<mask token>
def text(text, win, x, y):
font = pygame.font.SysFont('freesansbold.ttf', 50)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
def instructionText(text, win, x, y):
font = pygame.font.SysFont('Arial', 20)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
def header(text, win, x, y):
font = pygame.font.SysFont('TimesNewRoman', 70)
phrase = font.render(text, 1, (0, 0, 0))
win.blit(phrase, (x, y))
def mouseClick(screen):
x, y = pygame.mouse.get_pos()
if (x >= 65 and x <= 727) and (y >= 82 and y <= 618):
pygame.draw.circle(screen, (255, 0, 150), (x, y), 15)
return True, x, y
else:
print('Out of bounds!')
return False, x, y
def skeleExit(win):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
win.blit(aryadelight, (0, 0))
pygame.display.update()
xaxis = 100
for i in range(1, 42):
image = str(i) + '.png'
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250, 200))
text('Exiting...', win, xaxis + 20, 600)
pygame.display.update()
sleep(0.09)
def loading(win):
x = 0
while x < 3:
load0 = pygame.image.load(os.path.join('load0.png'))
win.blit(load0, (0, 0))
pygame.display.update()
sleep(0.3)
load1 = pygame.image.load(os.path.join('load1.png'))
win.blit(load1, (0, 0))
pygame.display.update()
sleep(0.3)
load2 = pygame.image.load(os.path.join('load2.png'))
win.blit(load2, (0, 0))
pygame.display.update()
sleep(0.3)
load3 = pygame.image.load(os.path.join('load3.png'))
win.blit(load3, (0, 0))
pygame.display.update()
sleep(0.3)
x += 1
def redrawMap(screen):
NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))
screen.blit(NTUmap, (0, 0))
for x in range(50, 900, 50):
pygame.draw.rect(screen, (255, 0, 0), (x, 0, 1, 700), 0)
for y in range(50, 700, 50):
pygame.draw.rect(screen, (255, 0, 0), (0, y, 900, 1), 0)
text('Please click on your current location!', screen, 200, 100)
def redrawGPSMap(screen, top3, x, y):
NTUmap = pygame.image.load(os.path.join('NTUMap.jpg'))
screen.blit(NTUmap, (0, 0))
redGPS = pygame.image.load(os.path.join('redgps.png'))
screen.blit(redGPS, (x - 16, y - 32))
instructionText('You are currently at this position.', screen, x + 4, y -
10)
counter = 1
for i in top3:
coor = canteen_list[i][5]
if counter == 1:
blueGPS = pygame.image.load(os.path.join('bluegps.png'))
screen.blit(blueGPS, (coor[0] - 12, coor[1] - 24))
instructionText(i, screen, coor[0] - 24, coor[1])
pass
if counter == 2:
blackGPS = pygame.image.load(os.path.join('blackgps.png'))
screen.blit(blackGPS, (coor[0] - 12, coor[1] - 24))
instructionText(i, screen, coor[0] - 24, coor[1])
pass
if counter == 3:
yellowGPS = pygame.image.load(os.path.join('yellowgps.png'))
screen.blit(yellowGPS, (coor[0] - 12, coor[1] - 24))
instructionText(i, screen, coor[0] - 24, coor[1])
pass
counter += 1
restartButton.draw(screen, (0, 0, 0))
def redrawMainWin(screen):
aryadelight = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(aryadelight, (0, 0))
mapButton.draw(screen, (0, 0, 0))
instructionText(
'(Choose your cuisines, preferences and budget for the meal here!)',
screen, 215, 320)
predictButton.draw(screen, (0, 0, 0))
instructionText('(Find the nearest canteen!)', screen, 132, 470)
exitButton.draw(screen, (0, 0, 0))
ice = pygame.image.load(os.path.join('ice.png'))
screen.blit(ice, (500, 670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render('Made by HweeHean X Arya', 1, (0, 0, 200))
screen.blit(creator, (535, 670))
def redrawCustWin(screen):
bp = pygame.image.load(os.path.join('gradient.jpg'))
screen.blit(bp, (0, 0))
instructionText('Left click again to reset!', screen, 300, 20)
text('Please select your food preference: ', screen, 100, 50)
halalButton.draw(screen, (0, 0, 0))
vegButton.draw(screen, (0, 0, 0))
nonhalalButton.draw(screen, (0, 0, 0))
text('Please select your cuisine type: ', screen, 100, 200)
koreanButton.draw(screen, (0, 0, 0))
malayButton.draw(screen, (0, 0, 0))
japanButton.draw(screen, (0, 0, 0))
chineseButton.draw(screen, (0, 0, 0))
indianButton.draw(screen, (0, 0, 0))
westernButton.draw(screen, (0, 0, 0))
text('Please select your maximum budget: ', screen, 100, 430)
button3.draw(screen, (0, 0, 0))
button5.draw(screen, (0, 0, 0))
button7.draw(screen, (0, 0, 0))
button9.draw(screen, (0, 0, 0))
nextButton.draw(screen, (0, 0, 0))
def redrawSearchWin(screen, x, y):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
return top3
def complicatedSearchWin(screen, top3):
bp = pygame.image.load(os.path.join('NTUFoodieRecsv1.png'))
screen.blit(bp, (0, 0))
GordonRamsay = pygame.image.load(os.path.join('GordonRamsay.png'))
screen.blit(GordonRamsay, (400, 100))
text('Nearest Canteen:', screen, 110, 400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == 'Food Court 1':
canteenPic = pygame.image.load(os.path.join('Canteen1.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 2':
canteenPic = pygame.image.load(os.path.join('Canteen2.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 4':
canteenPic = pygame.image.load(os.path.join('Canteen4.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 9':
canteenPic = pygame.image.load(os.path.join('Canteen9.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 11':
canteenPic = pygame.image.load(os.path.join('Canteen11.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 13':
canteenPic = pygame.image.load(os.path.join('Canteen13.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 14':
canteenPic = pygame.image.load(os.path.join('Canteen14.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Food Court 16':
canteenPic = pygame.image.load(os.path.join('Canteen16.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Tamarind Food Court':
canteenPic = pygame.image.load(os.path.join('Tamarind.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Pioneer Food Court':
canteenPic = pygame.image.load(os.path.join('Pioneer.png'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Food Court':
canteenPic = pygame.image.load(os.path.join('NorthSpine.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Spine Plaza':
canteenPic = pygame.image.load(os.path.join(
'NorthSpinePlaza.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'South Spine Food Court':
canteenPic = pygame.image.load(os.path.join(
'SouthSpineKoufuFoodCourt.png'))
screen.blit(canteenPic, (150, 200))
if k == 'Quad Cafe':
canteenPic = pygame.image.load(os.path.join('Quad.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'Coffee Bean':
canteenPic = pygame.image.load(os.path.join('Coffee.jpg'))
screen.blit(canteenPic, (150, 200))
if k == 'North Hill Food Court':
canteenPic = pygame.image.load(os.path.join('NorthHill.jpg'))
screen.blit(canteenPic, (150, 200))
text(str(canteenCount), screen, 110, yaxis)
text('.', screen, 135, yaxis)
text(k, screen, 150, yaxis)
canteenCount += 1
yaxis += 70
<mask token>
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
new_list = list(set(new_list))
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return new_list
def calc_dis(x1, y1, x2, y2):
return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 1 / 2
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
<mask token>
mapButton = button((255, 255, 255), 200, 250, 500, 100, 'Canteen Customisation'
)
predictButton = button((255, 255, 255), 100, 400, 300, 100, 'Prediction')
exitButton = button((255, 255, 255), 500, 400, 300, 100, 'Exit')
halalButton = button((255, 255, 255), 50, 120, 250, 50, 'Halal')
vegButton = button((255, 255, 255), 320, 120, 250, 50, 'Vegetarian')
nonhalalButton = button((255, 255, 255), 590, 120, 250, 50, 'Non-Halal')
koreanButton = button((255, 255, 255), 50, 270, 250, 50, 'Korean')
malayButton = button((255, 255, 255), 320, 270, 250, 50, 'Malay')
japanButton = button((255, 255, 255), 590, 270, 250, 50, 'Japanese')
chineseButton = button((255, 255, 255), 50, 340, 250, 50, 'Chinese')
indianButton = button((255, 255, 255), 320, 340, 250, 50, 'Indian')
westernButton = button((255, 255, 255), 590, 340, 250, 50, 'Western')
button3 = button((255, 255, 255), 235, 490, 70, 50, '$3')
button5 = button((255, 255, 255), 355, 490, 70, 50, '$5')
button7 = button((255, 255, 255), 475, 490, 70, 50, '$7')
button9 = button((255, 255, 255), 595, 490, 70, 50, '$10')
nextButton = button((255, 255, 255), 730, 580, 120, 70, 'Next')
gpsButton = button((255, 255, 255), 700, 600, 170, 50, 'to Map')
restartButton = button((255, 255, 255), 700, 600, 190, 50, 'Restart?')
<mask token>
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
<mask token>
pygame.init()
run = True
clock = pygame.time.Clock()
while run:
if checkButton:
redrawMainWin(screen)
if customisationMenu:
redrawCustWin(screen)
if easySearch:
if oneTime:
nearest_canteen = redrawSearchWin(screen, x, y)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0, 0, 0))
if complicatedMenu:
if oneTime:
complicatedSearchWin(screen, nearest_canteen)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0, 0, 0))
if gpsButtonPressed == True:
redrawGPSMap(screen, nearest_canteen, x, y)
pygame.display.update()
clock.tick(30)
for event in pygame.event.get():
pos = pygame.mouse.get_pos()
if event.type == pygame.QUIT:
run = False
pygame.quit()
if gpsButtonPressed:
if event.type == pygame.MOUSEBUTTONDOWN:
if restartButton.isOver(pos):
restartButton.colour = 50, 50, 50
restartButton.draw(screen, (0, 0, 0))
pygame.display.update()
print('clicked the restart button')
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
if event.type == pygame.MOUSEMOTION:
if restartButton.isOver(pos):
restartButton.colour = 0, 255, 0
continue
else:
restartButton.colour = 255, 255, 255
continue
if easySearch == True or complicatedMenu == True:
if event.type == pygame.MOUSEBUTTONDOWN:
if gpsButton.isOver(pos):
gpsButton.colour = 50, 50, 50
gpsButton.draw(screen, (0, 0, 0))
pygame.display.update()
print('clicked gps button')
gpsButtonPressed = True
easySearch = False
complicatedMenu = False
continue
if event.type == pygame.MOUSEMOTION:
if gpsButton.isOver(pos):
gpsButton.colour = 0, 255, 0
continue
else:
gpsButton.colour = 255, 255, 255
continue
if checkButton:
if event.type == pygame.MOUSEBUTTONDOWN:
if mapButton.isOver(pos):
mapButton.colour = 0, 255, 0
redrawMainWin(screen)
pygame.display.update()
print('clicked map button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor = True
continue
if predictButton.isOver(pos):
predictButton.colour = 0, 255, 0
redrawMainWin(screen)
pygame.display.update()
print('clicked predict button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor2 = True
continue
if exitButton.isOver(pos):
exitButton.colour = 0, 255, 0
print('Exiting...')
skeleExit(screen)
pygame.quit()
run = False
exit()
if event.type == pygame.MOUSEMOTION:
if mapButton.isOver(pos):
mapButton.colour = 255, 0, 0
else:
mapButton.colour = 255, 255, 255
if predictButton.isOver(pos):
predictButton.colour = 255, 0, 0
else:
predictButton.colour = 255, 255, 255
if exitButton.isOver(pos):
exitButton.colour = 255, 0, 0
else:
exitButton.colour = 255, 255, 255
if customisationMenu:
if event.type == pygame.MOUSEMOTION:
if nextButton.isOver(pos):
nextButton.colour = 0, 0, 255
else:
nextButton.colour = 255, 255, 255
continue
if event.type == pygame.MOUSEBUTTONDOWN:
if nextButton.isOver(pos):
nextButton.colour = 255, 255, 0
nextButtonPressed = True
customisationMenu = False
continue
if halalButton.isOver(pos):
if halalButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = 255, 255, 255
nonhalalButtonPressed = False
halalButton.colour = 0, 255, 0
print('clicked Halal button')
halalButtonPressed = True
continue
else:
halalButton.colour = 255, 255, 255
halalButtonPressed = False
continue
if vegButton.isOver(pos):
if vegButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = 255, 255, 255
nonhalalButtonPressed = False
vegButton.colour = 0, 255, 0
print('clicked Vegetarian button')
vegButtonPressed = True
continue
else:
vegButton.colour = 255, 255, 255
vegButtonPressed = False
continue
if nonhalalButton.isOver(pos):
if nonhalalButtonPressed == False:
if halalButtonPressed:
halalButton.colour = 255, 255, 255
halalButtonPressed = False
if vegButtonPressed:
vegButton.colour = 255, 255, 255
vegButtonPressed = False
nonhalalButton.colour = 0, 255, 0
print('clicked non-halal button')
nonhalalButtonPressed = True
continue
else:
nonhalalButton.colour = 255, 255, 255
nonhalalButtonPressed = False
if koreanButton.isOver(pos):
if koreanButtonPressed == False:
koreanButton.colour = 0, 255, 0
print('clicked korean button')
koreanButtonPressed = True
continue
else:
koreanButton.colour = 255, 255, 255
koreanButtonPressed = False
if malayButton.isOver(pos):
if malayButtonPressed == False:
malayButton.colour = 0, 255, 0
print('clicked Malay button')
malayButtonPressed = True
continue
else:
malayButton.colour = 255, 255, 255
malayButtonPressed = False
if japanButton.isOver(pos):
if japanButtonPressed == False:
japanButton.colour = 0, 255, 0
print('clicked japan button')
japanButtonPressed = True
continue
else:
japanButton.colour = 255, 255, 255
japanButtonPressed = False
if chineseButton.isOver(pos):
if chineseButtonPressed == False:
chineseButton.colour = 0, 255, 0
print('clicked chinese button')
chineseButtonPressed = True
continue
else:
chineseButton.colour = 255, 255, 255
chineseButtonPressed = False
if indianButton.isOver(pos):
if indianButtonPressed == False:
indianButton.colour = 0, 255, 0
print('clicked indian button')
indianButtonPressed = True
continue
else:
indianButton.colour = 255, 255, 255
indianButtonPressed = False
if westernButton.isOver(pos):
if westernButtonPressed == False:
westernButton.colour = 0, 255, 0
print('clicked western button')
westernButtonPressed = True
continue
else:
westernButton.colour = 255, 255, 255
westernButtonPressed = False
if button3.isOver(pos):
if button3Pressed == False:
if button5Pressed == True:
button5.colour = 255, 255, 255
button5Pressed = False
if button7Pressed == True:
button7.colour = 255, 255, 255
button7Pressed = False
if button9Pressed == True:
button9.colour = 255, 255, 255
button9Pressed = False
button3.colour = 0, 255, 0
print('clicked $3')
button3Pressed = True
continue
else:
button3.colour = 255, 255, 255
button3Pressed = False
if button5.isOver(pos):
if button5Pressed == False:
if button3Pressed == True:
button3.colour = 255, 255, 255
button3Pressed = False
if button7Pressed == True:
button7.colour = 255, 255, 255
button7Pressed = False
if button9Pressed == True:
button9.colour = 255, 255, 255
button9Pressed = False
button5.colour = 0, 255, 0
print('Clicked $5')
button5Pressed = True
continue
else:
button5.colour = 255, 255, 255
button5Pressed = False
if button7.isOver(pos):
if button7Pressed == False:
if button3Pressed == True:
button3.colour = 255, 255, 255
button3Pressed = False
if button5Pressed == True:
button5.colour = 255, 255, 255
button5Pressed = False
if button9Pressed == True:
button9.colour = 255, 255, 255
button9Pressed = False
button7.colour = 0, 255, 0
print('Clicked $7')
button7Pressed = True
continue
else:
button7.colour = 255, 255, 255
button7Pressed = False
if button9.isOver(pos):
if button9Pressed == False:
if button3Pressed == True:
button3.colour = 255, 255, 255
button3Pressed = False
if button5Pressed == True:
button5.colour = 255, 255, 255
button5Pressed = False
if button7Pressed == True:
button7.colour = 255, 255, 255
button7Pressed = False
button9.colour = 0, 255, 0
print('Clicked $10')
button9Pressed = True
continue
else:
button9.colour = 255, 255, 255
button9Pressed = False
if mapCoor == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
mapCoor = False
sleep(1)
customisationMenu = True
if mapCoor2 == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
mapCoor2 = False
sleep(1)
loading(screen)
easySearch = True
if nextButtonPressed:
sleep(1)
loading(screen)
user_prefList = []
user_cuisineList = []
user_budget = 0
if halalButtonPressed:
user_prefList.append('Halal')
if vegButtonPressed:
user_prefList.append('Vegetarian')
if nonhalalButtonPressed:
user_prefList.append('Non-Halal/Non-Vegetarian')
if koreanButtonPressed:
user_cuisineList.append('Korean')
if malayButtonPressed:
user_cuisineList.append('Malay')
if japanButtonPressed:
user_cuisineList.append('Japanese')
if chineseButtonPressed:
user_cuisineList.append('Chinese')
if indianButtonPressed:
user_cuisineList.append('Indian')
if westernButtonPressed:
user_cuisineList.append('Western')
if button3Pressed:
user_budget = 3
if button5Pressed:
user_budget = 5
if button7Pressed:
user_budget = 7
if button9Pressed:
user_budget = 9
print(user_cuisineList)
print(user_prefList)
print(user_budget)
finalID = final_list(user_budget, user_cuisineList, user_prefList)
print(finalID)
nearest_canteen = nearest_can(finalID, x, y)
print(nearest_canteen)
sleep(1)
nextButtonPressed = False
complicatedMenu = True
|
import pygame
import os
from time import sleep
screen = pygame.display.set_mode((900,700))
screen.fill((255,255,255))
pygame.display.set_caption("NTUFOODIERECOMMENDSYSTEM")
'''
###########################
──╔╗────╔╗
──║║───╔╝╚╗
╔═╝╠╦══╬╗╔╬╦══╦═╗╔══╦═╦╗─╔╗
║╔╗╠╣╔═╝║║╠╣╔╗║╔╗╣╔╗║╔╣║─║║
║╚╝║║╚═╗║╚╣║╚╝║║║║╔╗║║║╚═╝║
╚══╩╩══╝╚═╩╩══╩╝╚╩╝╚╩╝╚═╗╔╝
──────────────────────╔═╝║
──────────────────────╚══╝
###########################
● Database is stored on site.
● Updating is relatively simple.
● Programme runs on the basis of pygame, it's hard to update it without text input.
● However, it can easily be done so on shell/console accordingly.
'''
# Food court lists is sorted by [Highest Cost, Lowest Cost, Cuisines Available, Closing Time, Food Preferences Available, Coordinates on NTU Map] ; THE items have keys and corresponding values expressed as a pair, key: value
# where the keys would be that of the canteen names and this would be associated with that of the corresponding properties tht is alloted to it.
canteen_list = {
"Food Court 1": [12, 3.5, ["Korean", "Japanese", "Western"], 2100, ["Halal", "Non-Halal/Non-Vegetarian"], (442, 473)],
"Food Court 2": [10, 3.6, ["Korean", "Chinese", "Malay", ], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (477, 409)],
"Food Court 4": [10, 3, ["Chinese", "Western"], 2100, ["Non-Halal/Non-Vegetarian"], (358,526)],
"Food Court 9": [10, 3.5, ["Chinese"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (582, 288)],
"Food Court 11": [10, 2.5, ["Chinese", "Indian", "Japanese", "Western"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (682, 243)],
"Food Court 13": [9, 2, ["Western", "Korean", "Japanese", "Chinese"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (445, 176)],
"Food Court 14": [8, 3, ["Western", "Chinese", "Korean", "Malay"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (509, 182)],
"Food Court 16": [10, 3.3, ["Japanese", "Chinese", "Korean", "Indian"], 2100, ["Halal", "Vegetarian", "Non-Halal/Non-Vegetarian"], (405, 221)],
"Tamarind Food Court": [10, 3, ["Malay", "Chinese", "Korean", "Western"], 2100, ["Halal", "Non-Halal", "Vegetarian","Non-Halal/Non-Vegetarian"], (627, 200)],
"Pioneer Food Court": [20, 2.3, ["Thai", "Chinese"], 0000, ["Vegetarian", "Non-Halal/Non-Vegetarian"], (497, 561)],
"North Spine Food Court": [10, 2.5, ["Korean", "Japanese", "Chinese", "Western", "Malay"], 2100, ["Vegetarian", "Non-Halal/Non-Vegetarian"], (275, 293)],
"North Spine Plaza": [10, 4, ["Western", "Korean"], 2130, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (287, 339)],
"South Spine Food Court": [10, 2, ["Chinese", "Malay", "Korean", "Japanese", "Western"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (227, 496)],
"Quad Cafe": [10, 2.4, ["Korean", "Chinese", "Indian", "Malay"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (224, 351)],
"Coffee Bean": [20, 4, ["Western"], 2000, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (219, 389)],
"North Hill Food Court": [10, 3.8, ["Chinese", "Malay", "Indian"], 2100, ["Vegetarian", "Halal", "Non-Halal/Non-Vegetarian"], (720,314)]
}
'''
###########################################
───╔╗───────────╔═╗─────╔╗─────╔╗─╔╗
───║║───────────║╔╝─────║║────╔╝╚╦╝╚╗
╔══╣║╔══╦══╦══╗╔╝╚╦══╦═╗║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗
║╔═╣║║╔╗║══╣══╣╚╗╔╣╔╗║╔╝║╔╗║║║║║║─║║║╔╗║╔╗╗
║╚═╣╚╣╔╗╠══╠══║─║║║╚╝║║─║╚╝║╚╝║║╚╗║╚╣╚╝║║║║
╚══╩═╩╝╚╩══╩══╝─╚╝╚══╩╝─╚══╩══╝╚═╝╚═╩══╩╝╚╝
###########################################
● We had help from online tutorials to workout the UI buttons functionality.
● A bit of corresponding tweaks incorporating into project from the tutorial that I learnt from
● ref: https://www.youtube.com/watch?v=4_9twnEduFA
'''
class button():
def __init__(self, colour, x, y, width, height, text=''):
self.colour = colour
self.x = x
self.y = y
self.width = width
self.height = height
self.text = text
def draw(self,win,outline = None):
if outline:
#draw a bigger rectangle behind to create a border
pygame.draw.rect(win, outline, (self.x-2, self.y-2, self.width+4, self.height+4),0)
#draws the button rectangle
pygame.draw.rect(win, self.colour, (self.x, self.y, self.width, self.height),0)
if self.text != '':
font = pygame.font.SysFont('calligrapher.ttf', 60)
text = font.render(self.text, 1, (0,0,0))
win.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))
def isOver(self, pos):
#pos is the mouse position (x,y) coordinates
if pos[0] > self.x and pos[0] < self.x + self.width:
if pos[1] > self.y and pos[1] < self.y + self.height:
return True
else:
return False
'''
##################################
─╔═╗─────────╔╗
─║╔╝────────╔╝╚╗
╔╝╚╦╗╔╦═╗╔══╬╗╔╬╦══╦═╗╔══╗
╚╗╔╣║║║╔╗╣╔═╝║║╠╣╔╗║╔╗╣══╣
─║║║╚╝║║║║╚═╗║╚╣║╚╝║║║╠══║
─╚╝╚══╩╝╚╩══╝╚═╩╩══╩╝╚╩══╝
##################################
╔═╗────────╔╗
║═╬═╦╦╗╔═╦╦╬╣
║╔╣╬║╔╝║╬║║║║
╚╝╚═╩╝─╠╗╠═╩╝
───────╚═╝
#################
● Most of the functions here help to draw out the different states of the screen, that the screen could be in
● The redraw functions help to update the display based on it's respective transitory states
'''
#3 functions here controls the Surface Text appearancese
def text(text,win,x,y):
font = pygame.font.SysFont('freesansbold.ttf', 50)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def instructionText(text,win,x,y):
font = pygame.font.SysFont('Arial', 20)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def header(text,win,x,y):
font = pygame.font.SysFont('TimesNewRoman', 70)
phrase = font.render(text, 1, (0,0,0))
win.blit(phrase, (x,y))
def mouseClick(screen):
#checks for mouseclick event, and fetches corresp. positions
x,y = pygame.mouse.get_pos()
if (x >= 65 and x <=727) and (y >=82 and y <= 618):
#print(event.button)
pygame.draw.circle(screen, (255,0,150), (x,y), 15)
return True, x, y
else:
print("Out of bounds!")
return False, x, y
def skeleExit(win):
#exit event
aryadelight = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
win.blit(aryadelight,(0,0))
pygame.display.update()
xaxis = 100
for i in range(1,42):
image = str(i) + ".png"
skele = pygame.image.load(os.path.join(image))
win.blit(skele, (250,200))
text("Exiting...", win, (xaxis+20), 600)
pygame.display.update()
sleep(0.09)
def loading(win):
#loading screen, slep interval defined as 0.3 seconds to load subs. frame
x = 0
while x < 3:
load0 = pygame.image.load(os.path.join("load0.png"))
win.blit(load0, (0,0))
pygame.display.update()
sleep(0.3)
load1 = pygame.image.load(os.path.join("load1.png"))
win.blit(load1, (0,0))
pygame.display.update()
sleep(0.3)
load2 = pygame.image.load(os.path.join("load2.png"))
win.blit(load2, (0,0))
pygame.display.update()
sleep(0.3)
load3 = pygame.image.load(os.path.join("load3.png"))
win.blit(load3, (0,0))
pygame.display.update()
sleep(0.3)
x += 1
# ---------------------------------------------------------------------------#
def redrawMap(screen):
#draws the embedded NTU map image provided
NTUmap = pygame.image.load(os.path.join("NTUMap.jpg"))
screen.blit(NTUmap, (0,0))
for x in range(50,900,50):
#y axial grids
pygame.draw.rect(screen, (255,0,0), (x, 0, 1, 700), 0)
for y in range(50,700,50):
#x axial grids
pygame.draw.rect(screen, (255,0,0), (0, y, 900, 1), 0)
text('Please click on your current location!',screen,200,100)
def redrawGPSMap(screen, top3, x, y):
#redraw NTU map, but this time with corresponding location coordinates
NTUmap = pygame.image.load(os.path.join("NTUMap.jpg"))
screen.blit(NTUmap, (0,0))
redGPS = pygame.image.load(os.path.join("redgps.png"))
screen.blit(redGPS, (x-16,y-32))
instructionText("You are currently at this position.", screen, x+4, y-10)
counter = 1
for i in top3:
coor = canteen_list[i][5]
if counter == 1:
blueGPS = pygame.image.load(os.path.join("bluegps.png"))
screen.blit(blueGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
if counter == 2:
blackGPS = pygame.image.load(os.path.join("blackgps.png"))
screen.blit(blackGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
if counter == 3:
yellowGPS = pygame.image.load(os.path.join("yellowgps.png"))
screen.blit(yellowGPS, (coor[0]-12,coor[1]-24))
instructionText(i, screen, coor[0]-24, coor[1])
pass
counter += 1
restartButton.draw(screen, (0,0,0))
def redrawMainWin(screen):
#functionality that controls what is displayed on the main window
aryadelight = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(aryadelight,(0,0))
mapButton.draw(screen, (0,0,0))
instructionText("(Choose your cuisines, preferences and budget for the meal here!)",screen,215,320)
predictButton.draw(screen, (0,0,0))
instructionText("(Find the nearest canteen!)",screen,132,470)
exitButton.draw(screen, (0,0,0))
ice = pygame.image.load(os.path.join("ice.png"))
screen.blit(ice, (500,670))
font = pygame.font.SysFont('verdana', 20)
creator = font.render("Made by HweeHean X Arya", 1, (0,0,200))
screen.blit(creator, (535,670))
def redrawCustWin(screen):
#controls what is displayed on the customisation window
bp = pygame.image.load(os.path.join("gradient.jpg"))
screen.blit(bp,(0,0))
instructionText('Left click again to reset!',screen,300,20)
text('Please select your food preference: ', screen, 100, 50)
halalButton.draw(screen, (0,0,0))
vegButton.draw(screen, (0,0,0))
nonhalalButton.draw(screen, (0,0,0))
text('Please select your cuisine type: ', screen, 100, 200)
koreanButton.draw(screen, (0,0,0))
malayButton.draw(screen, (0,0,0))
japanButton.draw(screen, (0,0,0))
chineseButton.draw(screen, (0,0,0))
indianButton.draw(screen, (0,0,0))
westernButton.draw(screen, (0,0,0))
text('Please select your maximum budget: ', screen, 100, 430)
button3.draw(screen, (0,0,0))
button5.draw(screen, (0,0,0))
button7.draw(screen, (0,0,0))
button9.draw(screen, (0,0,0))
nextButton.draw(screen, (0,0,0))
def redrawSearchWin(screen,x,y):
#gives the top 3 most relevant results for the prediction tab
bp = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(bp,(0,0))
GordonRamsay = pygame.image.load(os.path.join("GordonRamsay.png"))
screen.blit(GordonRamsay, (400,100))
distList = []
for i in canteen_list:
distList.append(i)
print(distList)
top3 = nearest_can(distList, x, y)
print(top3)
text("Nearest Canteen:",screen,110,400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == "Food Court 1":
canteenPic = pygame.image.load(os.path.join("Canteen1.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 2":
canteenPic = pygame.image.load(os.path.join("Canteen2.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 4":
canteenPic = pygame.image.load(os.path.join("Canteen4.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 9":
canteenPic = pygame.image.load(os.path.join("Canteen9.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 11":
canteenPic = pygame.image.load(os.path.join("Canteen11.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 13":
canteenPic = pygame.image.load(os.path.join("Canteen13.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 14":
canteenPic = pygame.image.load(os.path.join("Canteen14.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 16":
canteenPic = pygame.image.load(os.path.join("Canteen16.png"))
screen.blit(canteenPic, (150,200))
if k == "Tamarind Food Court":
canteenPic = pygame.image.load(os.path.join("Tamarind.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Pioneer Food Court":
canteenPic = pygame.image.load(os.path.join("Pioneer.png"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Food Court":
canteenPic = pygame.image.load(os.path.join("NorthSpine.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Plaza":
canteenPic = pygame.image.load(os.path.join("NorthSpinePlaza.jpg"))
screen.blit(canteenPic, (150,200))
if k == "South Spine Food Court":
canteenPic = pygame.image.load(os.path.join("SouthSpineKoufuFoodCourt.png"))
screen.blit(canteenPic, (150,200))
if k == "Quad Cafe":
canteenPic = pygame.image.load(os.path.join("Quad.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Coffee Bean":
canteenPic = pygame.image.load(os.path.join("Coffee.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Hill Food Court":
canteenPic = pygame.image.load(os.path.join("NorthHill.jpg"))
screen.blit(canteenPic, (150,200))
text(str(canteenCount), screen, 110, yaxis)
text(".", screen, 135, yaxis)
text(k,screen,150,yaxis)
canteenCount += 1
yaxis += 70
return top3
def complicatedSearchWin(screen,top3):
#displays the top3 results for the end user after clicking customisation
bp = pygame.image.load(os.path.join("NTUFoodieRecsv1.png"))
screen.blit(bp,(0,0))
GordonRamsay = pygame.image.load(os.path.join("GordonRamsay.png"))
screen.blit(GordonRamsay, (400,100))
text("Nearest Canteen:",screen,110,400)
yaxis = 490
canteenCount = 1
for k in top3:
if canteenCount == 1:
if k == "Food Court 1":
canteenPic = pygame.image.load(os.path.join("Canteen1.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 2":
canteenPic = pygame.image.load(os.path.join("Canteen2.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 4":
canteenPic = pygame.image.load(os.path.join("Canteen4.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 9":
canteenPic = pygame.image.load(os.path.join("Canteen9.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 11":
canteenPic = pygame.image.load(os.path.join("Canteen11.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 13":
canteenPic = pygame.image.load(os.path.join("Canteen13.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 14":
canteenPic = pygame.image.load(os.path.join("Canteen14.png"))
screen.blit(canteenPic, (150,200))
if k == "Food Court 16":
canteenPic = pygame.image.load(os.path.join("Canteen16.png"))
screen.blit(canteenPic, (150,200))
if k == "Tamarind Food Court":
canteenPic = pygame.image.load(os.path.join("Tamarind.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Pioneer Food Court":
canteenPic = pygame.image.load(os.path.join("Pioneer.png"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Food Court":
canteenPic = pygame.image.load(os.path.join("NorthSpine.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Spine Plaza":
canteenPic = pygame.image.load(os.path.join("NorthSpinePlaza.jpg"))
screen.blit(canteenPic, (150,200))
if k == "South Spine Food Court":
canteenPic = pygame.image.load(os.path.join("SouthSpineKoufuFoodCourt.png"))
screen.blit(canteenPic, (150,200))
if k == "Quad Cafe":
canteenPic = pygame.image.load(os.path.join("Quad.jpg"))
screen.blit(canteenPic, (150,200))
if k == "Coffee Bean":
canteenPic = pygame.image.load(os.path.join("Coffee.jpg"))
screen.blit(canteenPic, (150,200))
if k == "North Hill Food Court":
canteenPic = pygame.image.load(os.path.join("NorthHill.jpg"))
screen.blit(canteenPic, (150,200))
text(str(canteenCount), screen, 110, yaxis)
text(".", screen, 135, yaxis)
text(k,screen,150,yaxis)
canteenCount += 1
yaxis += 70
'''
╔═╗────╔═╗───╔╗╔╗
║═╬═╦╦╗║═╬═╦╦╣╚╬╬═╦╦═╗
║╔╣╬║╔╝╠═║╬║╔╣╔╣║║║║╬║
╚╝╚═╩╝─╚═╩═╩╝╚═╩╩╩═╬╗║
───────────────────╚═╝
###########################
● Functions below control how we do the sorting for the distance
and the different cuisines
'''
#function provided by ARYA
#function to compile a list of all the relevant food courts
def final_list(user_budget, user_cuisine, user_preference):
new_list = []
#Creating a list of all food courts that fit in the user's budget
for i in canteen_list:
if user_budget >= canteen_list[i][1]:
new_list.append(i)
#Creating a list of all food courts according to the imposed constraints on cuisine
for c in user_cuisine:
for i in canteen_list:
if c in canteen_list[i][2]:
new_list.append(i)
#Adding to the list, all the food courts according to the food preferences specified
for c in user_preference:
for i in canteen_list:
if c in canteen_list[i][4]:
new_list.append(i)
#eliminating all the repeated options
new_list = list(set(new_list))
#if new_list is empty due to no selection made
if len(new_list) == 0:
for i in canteen_list:
new_list.append(i)
return(new_list)
#function to calulate the horizontal distance from you to proposed option
def calc_dis(x1, y1, x2, y2):
return ((x1-x2)**2 + (y1-y2)**2)**1/2
#function to find out the nearest suitable food outlet/food court
def nearest_can(new_list, x, y):
top3 = []
copy_list = new_list.copy()
while len(top3) != 3:
j = copy_list[0]
coor = canteen_list[j][5]
Min = calc_dis(x, y, coor[0], coor[1])
food_court = ''
for k in copy_list:
#coordinates of the food court
coor = canteen_list[k][5]
dist = calc_dis(x, y, coor[0], coor[1])
if Min >= dist:
Min = dist
food_court = k
index = copy_list.index(food_court)
copy_list.pop(index)
top3.append(food_court)
print(top3)
return top3
'''
#########################
╔╗─────╔╗─╔╗
║║────╔╝╚╦╝╚╗
║╚═╦╗╔╬╗╔╩╗╔╬══╦═╗╔══╗
║╔╗║║║║║║─║║║╔╗║╔╗╣══╣
║╚╝║╚╝║║╚╗║╚╣╚╝║║║╠══║
╚══╩══╝╚═╝╚═╩══╩╝╚╩══╝
#########################
● This is where the buttons are defined. Using the class...
● They are relatively self-explanatory
'''
#buttons for the main loading page:
mapButton = button((255,255,255), 200, 250, 500, 100, 'Canteen Customisation')
predictButton = button((255,255,255), 100, 400, 300, 100, 'Prediction')
exitButton = button((255,255,255), 500, 400, 300, 100, 'Exit')
#buttons for the custimisation screen:
halalButton = button((255,255,255), 50, 120, 250, 50, 'Halal')
vegButton = button((255,255,255), 320, 120, 250, 50, 'Vegetarian')
nonhalalButton = button((255,255,255), 590, 120, 250, 50, 'Non-Halal')
koreanButton = button((255,255,255), 50, 270, 250, 50, 'Korean')
malayButton = button((255,255,255), 320, 270, 250, 50, 'Malay')
japanButton = button((255,255,255), 590, 270, 250, 50, 'Japanese')
chineseButton = button((255,255,255), 50, 340, 250, 50, 'Chinese')
indianButton = button((255,255,255), 320, 340, 250, 50, 'Indian')
westernButton = button((255,255,255), 590, 340, 250, 50, 'Western')
button3 = button((255,255,255), 235, 490, 70, 50, '$3')
button5 = button((255,255,255), 355, 490, 70, 50, '$5')
button7 = button((255,255,255), 475, 490, 70, 50, '$7')
button9 = button((255,255,255), 595, 490, 70, 50, '$10')
nextButton = button((255,255,255), 730, 580, 120, 70, 'Next')
#buttons to showcase GPS:
gpsButton = button((255,255,255), 700, 600, 170, 50, 'to Map')
restartButton = button((255,255,255), 700, 600, 190, 50, 'Restart?')
'''
#############################
────╔╗────╔╗
───╔╝╚╗──╔╝╚╗
╔══╬╗╔╬══╬╗╔╬══╦══╗
║══╣║║║╔╗║║║║║═╣══╣
╠══║║╚╣╔╗║║╚╣║═╬══║
╚══╝╚═╩╝╚╝╚═╩══╩══╝
#############################
● Since I'm only using one while loop and all the functions are in here,
it is important to note that none of the "if" statements interfere with
each other
● Acts like a flip-flop which stores the data of the different STATES
'''
#originalstate of customisation buttons
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
#original state of events
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
'''
####################################
╔═╗╔═╗───────╔═══╗
║║╚╝║║───────║╔═╗║
║╔╗╔╗╠══╦╦═╗─║╚═╝╠═╦══╦══╦═╦══╦╗╔╗
║║║║║║╔╗╠╣╔╗╗║╔══╣╔╣╔╗║╔╗║╔╣╔╗║╚╝║
║║║║║║╔╗║║║║║║║──║║║╚╝║╚╝║║║╔╗║║║║
╚╝╚╝╚╩╝╚╩╩╝╚╝╚╝──╚╝╚══╩═╗╠╝╚╝╚╩╩╩╝
──────────────────────╔═╝║
──────────────────────╚══╝
####################################
● It involves a lot of existing predefined states, turning on and off to display
multiple things without them interfering with each other's functionality
● I.e. Clicking customisation button will disable itself, hence
if the mouse is clicked over at the same area, it will not
be activated again.
● This is every important to have a smooth flow.
● Also left some debugging messages within the console to help
understand what is going on behind the scenes
'''
pygame.init()
run = True
clock = pygame.time.Clock()
#start the pygame programme
while run:
#if true, redraws the main window
if checkButton:
redrawMainWin(screen)
#if true, redraws the customisation window
if customisationMenu:
redrawCustWin(screen)
if easySearch:
if oneTime:
nearest_canteen = redrawSearchWin(screen, x, y)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0,0,0))
#if true, redraws the complicated cusomisation results
if complicatedMenu:
if oneTime:
complicatedSearchWin(screen, nearest_canteen)
sleep(2)
oneTime = False
gpsButton.draw(screen, (0,0,0))
#redraws the GPS map, with point locaters indicated
if gpsButtonPressed == True:
redrawGPSMap(screen, nearest_canteen, x, y)
pygame.display.update()
clock.tick(30)
#checks event
for event in pygame.event.get():
#Fetches the mouse position
pos = pygame.mouse.get_pos()
#Quits the pygame programme
if event.type == pygame.QUIT:
run = False
pygame.quit()
if gpsButtonPressed:
if event.type == pygame.MOUSEBUTTONDOWN:
if restartButton.isOver(pos):
restartButton.colour = (50,50,50)
restartButton.draw(screen, (0,0,0))
pygame.display.update()
print('clicked the restart button')
#original state of customisation buttons
halalButtonPressed = False
vegButtonPressed = False
nonhalalButtonPressed = False
koreanButtonPressed = False
malayButtonPressed = False
japanButtonPressed = False
chineseButtonPressed = False
indianButtonPressed = False
westernButtonPressed = False
button3Pressed = False
button5Pressed = False
button7Pressed = False
button9Pressed = False
nextButtonPressed = False
gpsButtonPressed = False
#original state of events
checkButton = True
mapCoor = False
customisationMenu = False
mapCoor2 = False
easySearch = False
complicatedMenu = False
oneTime = True
if event.type == pygame.MOUSEMOTION:
if restartButton.isOver(pos):
restartButton.colour = (0,255,0)
continue
else:
restartButton.colour = (255,255,255)
continue
if easySearch == True or complicatedMenu == True:
if event.type == pygame.MOUSEBUTTONDOWN:
if gpsButton.isOver(pos):
gpsButton.colour = (50,50,50)
gpsButton.draw(screen, (0,0,0))
pygame.display.update()
print('clicked gps button')
gpsButtonPressed = True
easySearch = False
complicatedMenu = False
continue
if event.type == pygame.MOUSEMOTION:
if gpsButton.isOver(pos):
gpsButton.colour = (0,255,0)
continue
else:
gpsButton.colour = (255,255,255)
continue
#if mouse is clicked over buttons (main page)
if checkButton:
if event.type == pygame.MOUSEBUTTONDOWN:
if mapButton.isOver(pos):
mapButton.colour = (0,255,0)
redrawMainWin(screen)
pygame.display.update()
print('clicked map button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor = True
continue
if predictButton.isOver(pos):
predictButton.colour = (0,255,0)
redrawMainWin(screen)
pygame.display.update()
print('clicked predict button')
sleep(0.5)
redrawMap(screen)
checkButton = False
mapCoor2 = True
continue
if exitButton.isOver(pos):
exitButton.colour = (0,255,0)
print('Exiting...')
skeleExit(screen)
pygame.quit()
run = False
exit()
#if mouse hovered over the button (main page)
if event.type == pygame.MOUSEMOTION:
if mapButton.isOver(pos):
mapButton.colour = (255,0,0)
else:
mapButton.colour = (255,255,255)
if predictButton.isOver(pos):
predictButton.colour = (255,0,0)
else:
predictButton.colour = (255,255,255)
if exitButton.isOver(pos):
exitButton.colour = (255,0,0)
else:
exitButton.colour = (255,255,255)
#clicking buttons in the customisation menu:
if customisationMenu:
if event.type == pygame.MOUSEMOTION:
if nextButton.isOver(pos):
nextButton.colour = (0,0,255)
else:
nextButton.colour = (255,255,255)
continue
if event.type == pygame.MOUSEBUTTONDOWN:
#clicking on next button
if nextButton.isOver(pos):
nextButton.colour = (255,255,0)
nextButtonPressed = True
customisationMenu = False
continue
if halalButton.isOver(pos):
if halalButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
halalButton.colour = (0,255,0)
print('clicked Halal button')
halalButtonPressed = True
continue
else:
halalButton.colour = (255,255,255)
halalButtonPressed = False
continue
if vegButton.isOver(pos):
if vegButtonPressed == False:
if nonhalalButtonPressed:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
vegButton.colour = (0,255,0)
print('clicked Vegetarian button')
vegButtonPressed = True
continue
else:
vegButton.colour = (255,255,255)
vegButtonPressed = False
continue
if nonhalalButton.isOver(pos):
if nonhalalButtonPressed == False:
if halalButtonPressed:
halalButton.colour = (255,255,255)
halalButtonPressed = False
if vegButtonPressed:
vegButton.colour = (255,255,255)
vegButtonPressed = False
nonhalalButton.colour = (0,255,0)
print('clicked non-halal button')
nonhalalButtonPressed = True
continue
else:
nonhalalButton.colour = (255,255,255)
nonhalalButtonPressed = False
if koreanButton.isOver(pos):
if koreanButtonPressed == False:
koreanButton.colour = (0,255,0)
print('clicked korean button')
koreanButtonPressed = True
continue
else:
koreanButton.colour = (255,255,255)
koreanButtonPressed = False
if malayButton.isOver(pos):
if malayButtonPressed == False:
malayButton.colour = (0,255,0)
print('clicked Malay button')
malayButtonPressed = True
continue
else:
malayButton.colour = (255,255,255)
malayButtonPressed = False
if japanButton.isOver(pos):
if japanButtonPressed == False:
japanButton.colour = (0,255,0)
print('clicked japan button')
japanButtonPressed = True
continue
else:
japanButton.colour = (255,255,255)
japanButtonPressed = False
if chineseButton.isOver(pos):
if chineseButtonPressed == False:
chineseButton.colour = (0,255,0)
print('clicked chinese button')
chineseButtonPressed = True
continue
else:
chineseButton.colour = (255,255,255)
chineseButtonPressed = False
if indianButton.isOver(pos):
if indianButtonPressed == False:
indianButton.colour = (0,255,0)
print('clicked indian button')
indianButtonPressed = True
continue
else:
indianButton.colour = (255,255,255)
indianButtonPressed = False
if westernButton.isOver(pos):
if westernButtonPressed == False:
westernButton.colour = (0,255,0)
print('clicked western button')
westernButtonPressed = True
continue
else:
westernButton.colour = (255,255,255)
westernButtonPressed = False
if button3.isOver(pos):
if button3Pressed == False:
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button3.colour = (0,255,0)
print('clicked $3')
button3Pressed = True
continue
else:
button3.colour = (255,255,255)
button3Pressed = False
if button5.isOver(pos):
if button5Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button5.colour = (0,255,0)
print('Clicked $5')
button5Pressed = True
continue
else:
button5.colour = (255,255,255)
button5Pressed = False
if button7.isOver(pos):
if button7Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button9Pressed == True:
button9.colour = (255,255,255)
button9Pressed = False
button7.colour = (0,255,0)
print('Clicked $7')
button7Pressed = True
continue
else:
button7.colour = (255,255,255)
button7Pressed = False
if button9.isOver(pos):
if button9Pressed == False:
if button3Pressed == True:
button3.colour = (255,255,255)
button3Pressed = False
if button5Pressed == True:
button5.colour = (255,255,255)
button5Pressed = False
if button7Pressed == True:
button7.colour = (255,255,255)
button7Pressed = False
button9.colour = (0,255,0)
print('Clicked $10')
button9Pressed = True
continue
else:
button9.colour = (255,255,255)
button9Pressed = False
#if mousebuttondown and map is already displayed
if mapCoor == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
#pygame.time.delay(2000)
mapCoor = False
sleep(1)
customisationMenu = True
#if prediction button is clicked
if mapCoor2 == True and event.type == pygame.MOUSEBUTTONDOWN:
mouseclick = mouseClick(screen)
if mouseclick[0]:
pygame.display.update()
x = mouseclick[1]
y = mouseclick[2]
print(x, ',', y)
#pygame.time.delay(2000)
mapCoor2 = False
sleep(1)
loading(screen)
easySearch = True
#things that happen after the next button is pressed
if nextButtonPressed:
sleep(1)
loading(screen)
user_prefList = []
user_cuisineList = []
user_budget = 0
if halalButtonPressed:
user_prefList.append("Halal")
if vegButtonPressed:
user_prefList.append("Vegetarian")
if nonhalalButtonPressed:
user_prefList.append("Non-Halal/Non-Vegetarian")
if koreanButtonPressed:
user_cuisineList.append("Korean")
if malayButtonPressed:
user_cuisineList.append("Malay")
if japanButtonPressed:
user_cuisineList.append("Japanese")
if chineseButtonPressed:
user_cuisineList.append("Chinese")
if indianButtonPressed:
user_cuisineList.append("Indian")
if westernButtonPressed:
user_cuisineList.append("Western")
if button3Pressed:
user_budget = 3
if button5Pressed:
user_budget = 5
if button7Pressed:
user_budget = 7
if button9Pressed:
user_budget = 9
#debug
print(user_cuisineList)
print(user_prefList)
print(user_budget)
#continue#
finalID = final_list(user_budget, user_cuisineList, user_prefList)
print(finalID)
nearest_canteen = nearest_can(finalID, x, y)
print(nearest_canteen)
sleep(1)
nextButtonPressed = False
complicatedMenu = True
|
[
11,
12,
15,
22,
23
] |
2,126 |
ee91e8c9dcb940882733b2d23b74a76d0392f4fe
|
<mask token>
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
<mask token>
@classmethod
def value(cls, control):
return control.text()
<mask token>
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
<mask token>
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get('relpath', '.')
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent
)
def default(self, path):
self.relpath = path or '.'
return '.'
def set_link(self, value):
self.relpath = value or '.'
def filter(self, value):
if not value:
return '.'
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return '.'
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get('choices', []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {'text': item, 'value': item}
self.selects.append(item)
self._data_dict = {item['value']: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item['text'])
combo.setItemData(i, item['value'])
if 'icon' in item:
combo.setItemIcon(i, item['icon'])
@staticmethod
def set_value(combo, value):
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
return combo.itemData(combo.currentIndex())
def data(self, value):
return self._data_dict[value]['text'
] if value in self._data_dict else None
def icon(self, value):
try:
return self._data_dict[value]['icon'
] if value in self._data_dict else None
except KeyError:
return None
|
<mask token>
class TypeBool(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
combo = QComboBox(parent)
combo.addItem('Yes')
combo.addItem('No')
return combo
<mask token>
<mask token>
<mask token>
class CheckBox(QCheckBox):
def __init__(self, item, parent):
super(CheckBox, self).__init__(parent)
self.item = item
self.stateChanged.connect(self.on_state_changed)
def on_state_changed(self, state):
self.item.set_value(state == Qt.Checked, force_update=True)
class TypeCheck(TypeBase):
is_persistent_editor = True
@classmethod
def control(cls, delegate, property_item, parent):
check = CheckBox(property_item, parent)
return check
@classmethod
def set_value(cls, control, value):
control.setCheckState(Qt.Checked if value else Qt.Unchecked)
@classmethod
def value(cls, control):
return control.isChecked()
class TypeFilePath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return FilePathWidget(delegate, property_item.params, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get('relpath', '.')
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent
)
def default(self, path):
self.relpath = path or '.'
return '.'
def set_link(self, value):
self.relpath = value or '.'
def filter(self, value):
if not value:
return '.'
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return '.'
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get('choices', []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {'text': item, 'value': item}
self.selects.append(item)
self._data_dict = {item['value']: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item['text'])
combo.setItemData(i, item['value'])
if 'icon' in item:
combo.setItemIcon(i, item['icon'])
@staticmethod
def set_value(combo, value):
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
return combo.itemData(combo.currentIndex())
def data(self, value):
return self._data_dict[value]['text'
] if value in self._data_dict else None
def icon(self, value):
try:
return self._data_dict[value]['icon'
] if value in self._data_dict else None
except KeyError:
return None
|
<mask token>
class TypeBool(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
combo = QComboBox(parent)
combo.addItem('Yes')
combo.addItem('No')
return combo
<mask token>
@classmethod
def value(cls, control):
return control.currentIndex() == 0
@staticmethod
def data(value):
return 'Yes' if value else 'No'
class CheckBox(QCheckBox):
def __init__(self, item, parent):
super(CheckBox, self).__init__(parent)
self.item = item
self.stateChanged.connect(self.on_state_changed)
def on_state_changed(self, state):
self.item.set_value(state == Qt.Checked, force_update=True)
class TypeCheck(TypeBase):
is_persistent_editor = True
@classmethod
def control(cls, delegate, property_item, parent):
check = CheckBox(property_item, parent)
return check
@classmethod
def set_value(cls, control, value):
control.setCheckState(Qt.Checked if value else Qt.Unchecked)
@classmethod
def value(cls, control):
return control.isChecked()
class TypeFilePath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return FilePathWidget(delegate, property_item.params, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get('relpath', '.')
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent
)
def default(self, path):
self.relpath = path or '.'
return '.'
def set_link(self, value):
self.relpath = value or '.'
def filter(self, value):
if not value:
return '.'
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return '.'
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get('choices', []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {'text': item, 'value': item}
self.selects.append(item)
self._data_dict = {item['value']: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item['text'])
combo.setItemData(i, item['value'])
if 'icon' in item:
combo.setItemIcon(i, item['icon'])
@staticmethod
def set_value(combo, value):
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
return combo.itemData(combo.currentIndex())
def data(self, value):
return self._data_dict[value]['text'
] if value in self._data_dict else None
def icon(self, value):
try:
return self._data_dict[value]['icon'
] if value in self._data_dict else None
except KeyError:
return None
|
<mask token>
class TypeBase(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class TypeBool(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
combo = QComboBox(parent)
combo.addItem('Yes')
combo.addItem('No')
return combo
@classmethod
def set_value(cls, control, value):
control.setCurrentIndex(0 if value else 1)
@classmethod
def value(cls, control):
return control.currentIndex() == 0
@staticmethod
def data(value):
return 'Yes' if value else 'No'
class CheckBox(QCheckBox):
def __init__(self, item, parent):
super(CheckBox, self).__init__(parent)
self.item = item
self.stateChanged.connect(self.on_state_changed)
def on_state_changed(self, state):
self.item.set_value(state == Qt.Checked, force_update=True)
class TypeCheck(TypeBase):
is_persistent_editor = True
@classmethod
def control(cls, delegate, property_item, parent):
check = CheckBox(property_item, parent)
return check
@classmethod
def set_value(cls, control, value):
control.setCheckState(Qt.Checked if value else Qt.Unchecked)
@classmethod
def value(cls, control):
return control.isChecked()
class TypeFilePath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return FilePathWidget(delegate, property_item.params, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ''
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get('relpath', '.')
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent
)
def default(self, path):
self.relpath = path or '.'
return '.'
def set_link(self, value):
self.relpath = value or '.'
def filter(self, value):
if not value:
return '.'
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return '.'
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get('choices', []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {'text': item, 'value': item}
self.selects.append(item)
self._data_dict = {item['value']: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item['text'])
combo.setItemData(i, item['value'])
if 'icon' in item:
combo.setItemIcon(i, item['icon'])
@staticmethod
def set_value(combo, value):
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
return combo.itemData(combo.currentIndex())
def data(self, value):
return self._data_dict[value]['text'
] if value in self._data_dict else None
def icon(self, value):
try:
return self._data_dict[value]['icon'
] if value in self._data_dict else None
except KeyError:
return None
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import, unicode_literals
import os
from qtpy.QtCore import *
# from qtpy.QtGui import *
from qtpy.QtWidgets import *
from six import string_types
from ..widgets import PathParamWidget, RelPathParamWidget, FilePathWidget
class TypeBase(object):
@classmethod
def create(cls, _):
"""
Create instance or return class
"""
return cls
@classmethod
def control(cls, delegate, property_item, parent):
return None
@staticmethod
def data(value):
"""
return item's data() value
"""
return value
@classmethod
def value(cls, control):
return None
@staticmethod
def icon(_):
return None
@classmethod
def height(cls):
return -1
@classmethod
def default(cls, value):
return value
@classmethod
def filter(cls, value):
return value
@classmethod
def set_link(cls, value):
pass
@classmethod
def link_value(cls, default_value, link_value):
return link_value or default_value
@classmethod
def sizeHint(cls):
return QSize(-1, -1)
@classmethod
def setup(cls, item):
pass
@classmethod
def set_value(cls, control, value):
control.setText(value)
is_persistent_editor = False
class TypeBool(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
combo = QComboBox(parent)
combo.addItem("Yes")
combo.addItem("No")
return combo
@classmethod
def set_value(cls, control, value):
control.setCurrentIndex(0 if value else 1)
@classmethod
def value(cls, control):
return control.currentIndex() == 0
@staticmethod
def data(value):
return "Yes" if value else "No"
class CheckBox(QCheckBox):
def __init__(self, item, parent):
super(CheckBox, self).__init__(parent)
self.item = item
# noinspection PyUnresolvedReferences
self.stateChanged.connect(self.on_state_changed)
def on_state_changed(self, state):
self.item.set_value(state == Qt.Checked, force_update=True)
class TypeCheck(TypeBase):
is_persistent_editor = True
@classmethod
def control(cls, delegate, property_item, parent):
check = CheckBox(property_item, parent)
return check
@classmethod
def set_value(cls, control, value):
# type: (QCheckBox, bool) -> None
control.setCheckState(Qt.Checked if value else Qt.Unchecked)
@classmethod
def value(cls, control):
# type: (QCheckBox) -> bool
return control.isChecked()
class TypeFilePath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return FilePathWidget(delegate, property_item.params, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ""
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeDirPath(TypeBase):
@classmethod
def control(cls, delegate, property_item, parent):
return PathParamWidget(delegate, parent=parent)
@classmethod
def set_value(cls, control, value):
control.setText(value)
@classmethod
def value(cls, control):
return control.text()
@classmethod
def filter(cls, value):
return os.path.normpath(value) if value else value
@classmethod
def link_value(cls, default_value, link_value):
if default_value is None and link_value is None:
return ""
if link_value is None:
return default_value
if default_value is None:
return link_value
return os.path.join(default_value, link_value)
@classmethod
def sizeHint(cls):
return QSize(-1, 28)
class TypeRelDirPath(TypeDirPath):
@classmethod
def create(cls, params):
return cls(params)
def __init__(self, params):
self.relpath = params.get("relpath", ".")
def control(self, delegate, property_item, parent):
return RelPathParamWidget(delegate, relpath=self.relpath, parent=parent)
def default(self, path):
self.relpath = path or "."
return "."
def set_link(self, value):
self.relpath = value or "."
def filter(self, value):
if not value:
return "."
try:
if os.path.isabs(value):
return os.path.relpath(value, self.relpath)
else:
return value
except ValueError:
return "."
# noinspection PyArgumentList
class TypeChoice(TypeBase):
@classmethod
def create(cls, params):
return cls(params.get("choices", []))
def __init__(self, choices):
self.selects = []
self._data_dict = {}
self.setup_choices(choices)
def setup_choices(self, choices):
self.selects = []
for item in choices:
if isinstance(item, string_types):
item = {
"text": item,
"value": item,
}
self.selects.append(item)
self._data_dict = {item["value"]: item for item in self.selects}
def control(self, delegate, property_item, parent):
combo = QComboBox(parent)
self.setup_combo_box(combo)
return combo
def setup_combo_box(self, combo):
for i, item in enumerate(self.selects):
combo.addItem(item["text"])
combo.setItemData(i, item["value"])
if "icon" in item:
combo.setItemIcon(i, item["icon"])
# noinspection PyMethodOverriding
@staticmethod
def set_value(combo, value):
# type: (QComboBox, str) -> None
index = combo.findData(value)
combo.setCurrentIndex(index)
@classmethod
def value(cls, combo):
# type: (QComboBox, str) -> None
return combo.itemData(combo.currentIndex())
# noinspection PyMethodOverriding
def data(self, value):
return self._data_dict[value]["text"] if value in self._data_dict else None
# noinspection PyMethodOverriding
def icon(self, value):
try:
return self._data_dict[value]["icon"] if value in self._data_dict else None
except KeyError:
return None
|
[
21,
41,
43,
45,
61
] |
2,127 |
8f1ec65ca60605747f46f596e0b5848922bcd0b5
|
<mask token>
|
<mask token>
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
<mask token>
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
<mask token>
groups = input.split('\n\n')
count = 0
groupanswers = []
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
from day6input import *
groups = input.split('\n\n')
count = 0
groupanswers = []
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz'))
answers = set()
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person))
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers)
print(count)
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
from day6input import *
groups = input.split('\n\n')
count = 0 #1
groupanswers = [] #2
for group in groups:
allananswers = set(list('abcdefghijklmnopqrstuvwxyz')) #2
answers = set() #1
people = group.split('\n')
for person in people:
allananswers = allananswers & set(list(person)) #2
#1
for answer in person:
if answer not in answers:
answers.add(answer)
count = count + 1
groupanswers.append(allananswers) #2
print(count) #1
#####2
answer2 = 0
for group in groupanswers:
answer2 = answer2 + len(group)
print(answer2)
|
[
0,
1,
2,
3,
4
] |
2,128 |
542bd52e3d5bc79077277034234419983005f78e
|
<mask token>
class OrderClient(PayPalClient):
""" This is the sample function to create an order. It uses the
JSON body returned by buildRequestBody() to create an order."""
def create_order(self, order_body, debug=False):
request = OrdersCreateRequest()
request.prefer('return=representation')
request.request_body(order_body)
response = self.client.execute(request)
if debug:
print('Status Code: ', response.status_code)
print('Status: ', response.result.status)
print('Order ID: ', response.result.id)
print('Intent: ', response.result.intent)
print('Links:')
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(link.rel, link.href,
link.method))
print('Total Amount: {} {}'.format(response.result.
purchase_units[0].amount.currency_code, response.result.
purchase_units[0].amount.value))
return response
def capture_order(self, token, debug=False):
request = OrdersCaptureRequest(token)
try:
response = self.client.execute(request)
order_id = response.result.id
return order_id
except IOError as ioe:
return 0
|
<mask token>
class PayPalClient:
<mask token>
<mask token>
<mask token>
<mask token>
class OrderClient(PayPalClient):
""" This is the sample function to create an order. It uses the
JSON body returned by buildRequestBody() to create an order."""
def create_order(self, order_body, debug=False):
request = OrdersCreateRequest()
request.prefer('return=representation')
request.request_body(order_body)
response = self.client.execute(request)
if debug:
print('Status Code: ', response.status_code)
print('Status: ', response.result.status)
print('Order ID: ', response.result.id)
print('Intent: ', response.result.intent)
print('Links:')
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(link.rel, link.href,
link.method))
print('Total Amount: {} {}'.format(response.result.
purchase_units[0].amount.currency_code, response.result.
purchase_units[0].amount.value))
return response
def capture_order(self, token, debug=False):
request = OrdersCaptureRequest(token)
try:
response = self.client.execute(request)
order_id = response.result.id
return order_id
except IOError as ioe:
return 0
|
<mask token>
class PayPalClient:
<mask token>
<mask token>
def array_to_json_array(self, json_array):
result = []
if isinstance(json_array, list):
for item in json_array:
result.append(self.object_to_json(item) if not self.
is_primittive(item) else self.array_to_json_array(item) if
isinstance(item, list) else item)
return result
<mask token>
class OrderClient(PayPalClient):
""" This is the sample function to create an order. It uses the
JSON body returned by buildRequestBody() to create an order."""
def create_order(self, order_body, debug=False):
request = OrdersCreateRequest()
request.prefer('return=representation')
request.request_body(order_body)
response = self.client.execute(request)
if debug:
print('Status Code: ', response.status_code)
print('Status: ', response.result.status)
print('Order ID: ', response.result.id)
print('Intent: ', response.result.intent)
print('Links:')
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(link.rel, link.href,
link.method))
print('Total Amount: {} {}'.format(response.result.
purchase_units[0].amount.currency_code, response.result.
purchase_units[0].amount.value))
return response
def capture_order(self, token, debug=False):
request = OrdersCaptureRequest(token)
try:
response = self.client.execute(request)
order_id = response.result.id
return order_id
except IOError as ioe:
return 0
|
<mask token>
class PayPalClient:
def __init__(self):
self.client_id = settings.PAYPAL_CLIENT_ID
self.client_secret = settings.PAYPAL_SECRET
"""Set up and return PayPal Python SDK environment with PayPal access credentials.
This sample uses SandboxEnvironment. In production, use LiveEnvironment."""
self.environment = SandboxEnvironment(client_id=self.client_id,
client_secret=self.client_secret)
""" Returns PayPal HTTP client instance with environment that has access
credentials context. Use this instance to invoke PayPal APIs, provided the
credentials have access. """
self.client = PayPalHttpClient(self.environment)
def object_to_json(self, json_data):
"""
Function to print all json data in an organized readable manner
"""
result = {}
if sys.version_info[0] < 3:
itr = json_data.__dict__.iteritems()
else:
itr = json_data.__dict__.items()
for key, value in itr:
if key.startswith('__'):
continue
result[key] = self.array_to_json_array(value) if isinstance(value,
list) else self.object_to_json(value
) if not self.is_primittive(value) else value
return result
def array_to_json_array(self, json_array):
result = []
if isinstance(json_array, list):
for item in json_array:
result.append(self.object_to_json(item) if not self.
is_primittive(item) else self.array_to_json_array(item) if
isinstance(item, list) else item)
return result
<mask token>
class OrderClient(PayPalClient):
""" This is the sample function to create an order. It uses the
JSON body returned by buildRequestBody() to create an order."""
def create_order(self, order_body, debug=False):
request = OrdersCreateRequest()
request.prefer('return=representation')
request.request_body(order_body)
response = self.client.execute(request)
if debug:
print('Status Code: ', response.status_code)
print('Status: ', response.result.status)
print('Order ID: ', response.result.id)
print('Intent: ', response.result.intent)
print('Links:')
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(link.rel, link.href,
link.method))
print('Total Amount: {} {}'.format(response.result.
purchase_units[0].amount.currency_code, response.result.
purchase_units[0].amount.value))
return response
def capture_order(self, token, debug=False):
request = OrdersCaptureRequest(token)
try:
response = self.client.execute(request)
order_id = response.result.id
return order_id
except IOError as ioe:
return 0
|
from paypalcheckoutsdk.core import PayPalHttpClient, SandboxEnvironment
from paypalcheckoutsdk.orders import OrdersCaptureRequest, OrdersCreateRequest
from django.conf import settings
import sys
class PayPalClient:
def __init__(self):
self.client_id = settings.PAYPAL_CLIENT_ID
self.client_secret = settings.PAYPAL_SECRET
"""Set up and return PayPal Python SDK environment with PayPal access credentials.
This sample uses SandboxEnvironment. In production, use LiveEnvironment."""
self.environment = SandboxEnvironment(client_id=self.client_id, client_secret=self.client_secret)
""" Returns PayPal HTTP client instance with environment that has access
credentials context. Use this instance to invoke PayPal APIs, provided the
credentials have access. """
self.client = PayPalHttpClient(self.environment)
def object_to_json(self, json_data):
"""
Function to print all json data in an organized readable manner
"""
result = {}
if sys.version_info[0] < 3:
itr = json_data.__dict__.iteritems()
else:
itr = json_data.__dict__.items()
for key,value in itr:
# Skip internal attributes.
if key.startswith("__"):
continue
result[key] = self.array_to_json_array(value) if isinstance(value, list) else\
self.object_to_json(value) if not self.is_primittive(value) else\
value
return result;
def array_to_json_array(self, json_array):
result =[]
if isinstance(json_array, list):
for item in json_array:
result.append(self.object_to_json(item) if not self.is_primittive(item) \
else self.array_to_json_array(item) if isinstance(item, list) else item)
return result
def is_primittive(self, data):
return isinstance(data, str) or isinstance(data, int)
class OrderClient(PayPalClient):
#2. Set up your server to receive a call from the client
""" This is the sample function to create an order. It uses the
JSON body returned by buildRequestBody() to create an order."""
def create_order(self, order_body, debug=False):
request = OrdersCreateRequest()
request.prefer('return=representation')
#3. Call PayPal to set up a transaction
request.request_body(order_body)
response = self.client.execute(request)
if debug:
print('Status Code: ', response.status_code)
print( 'Status: ', response.result.status)
print( 'Order ID: ', response.result.id)
print( 'Intent: ', response.result.intent)
print ('Links:')
for link in response.result.links:
print('\t{}: {}\tCall Type: {}'.format(link.rel, link.href, link.method))
print ('Total Amount: {} {}'.format(response.result.purchase_units[0].amount.currency_code,
response.result.purchase_units[0].amount.value))
return response
def capture_order(self, token, debug=False):
request = OrdersCaptureRequest(token)
try :
response = self.client.execute(request)
order_id = response.result.id
return order_id
except IOError as ioe:
return 0
|
[
4,
5,
6,
8,
11
] |
2,129 |
d71ec86f68cc81c93a39f15c785c75c2a1023f14
|
<mask token>
|
<mask token>
def fetch_data(faultNumber, position):
df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%.csv')
df1.set_index(df1.columns[0])
df1 = df1.drop(columns=[df1.columns[0]])
df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%_LSTM-AE_Output.csv')
df2.set_index(df2.columns[0])
df2 = df2.drop(columns=[df2.columns[0]])
df1 = df1.join(df2['Loss_mae'])
df1 = df1.join(df2['Threshold'])
df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)
df2.join(df1['pointType'])
return df1
<mask token>
|
<mask token>
def fetch_data(faultNumber, position):
df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%.csv')
df1.set_index(df1.columns[0])
df1 = df1.drop(columns=[df1.columns[0]])
df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%_LSTM-AE_Output.csv')
df2.set_index(df2.columns[0])
df2 = df2.drop(columns=[df2.columns[0]])
df1 = df1.join(df2['Loss_mae'])
df1 = df1.join(df2['Threshold'])
df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)
df2.join(df1['pointType'])
return df1
def _label_point(row):
if np.isnan(row.Threshold):
return 'TR'
if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] != 0:
return 'TP'
if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] != 0:
return 'FN'
if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] == 0:
return 'FP'
if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] == 0:
return 'TN'
|
import numpy as np
import pandas as pd
def fetch_data(faultNumber, position):
df1 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%.csv')
df1.set_index(df1.columns[0])
df1 = df1.drop(columns=[df1.columns[0]])
df2 = pd.read_csv('./data/TEP_CaseStudy_Fault_' + str(faultNumber) +
'_Pos_' + str(position) + '%_LSTM-AE_Output.csv')
df2.set_index(df2.columns[0])
df2 = df2.drop(columns=[df2.columns[0]])
df1 = df1.join(df2['Loss_mae'])
df1 = df1.join(df2['Threshold'])
df1['pointType'] = df1.apply(lambda row: _label_point(row), axis=1)
df2.join(df1['pointType'])
return df1
def _label_point(row):
if np.isnan(row.Threshold):
return 'TR'
if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] != 0:
return 'TP'
if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] != 0:
return 'FN'
if row['Loss_mae'] >= row['Threshold'] and row['faultNumber'] == 0:
return 'FP'
if row['Loss_mae'] < row['Threshold'] and row['faultNumber'] == 0:
return 'TN'
|
import numpy as np
import pandas as pd
def fetch_data(faultNumber, position):
df1 = pd.read_csv("./data/TEP_CaseStudy_Fault_" + str(faultNumber) + "_Pos_" + str(position) + "%.csv")
df1.set_index(df1.columns[0])
df1 = df1.drop(columns=[df1.columns[0]])
df2 = pd.read_csv("./data/TEP_CaseStudy_Fault_" + str(faultNumber) + "_Pos_" + str(position) + "%_LSTM-AE_Output.csv")
df2.set_index(df2.columns[0])
df2 = df2.drop(columns=[df2.columns[0]])
df1 = df1.join(df2["Loss_mae"])
df1 = df1.join(df2["Threshold"])
df1["pointType"] = df1.apply(lambda row: _label_point(row), axis=1)
df2.join(df1["pointType"])
return df1
def _label_point(row):
if np.isnan(row.Threshold):
return "TR"
if (row["Loss_mae"] >= row["Threshold"]) and (row["faultNumber"] != 0):
return "TP"
if (row["Loss_mae"] < row["Threshold"]) and (row["faultNumber"] != 0):
return "FN"
if (row["Loss_mae"] >= row["Threshold"]) and (row["faultNumber"] == 0):
return "FP"
if (row["Loss_mae"] < row["Threshold"]) and (row["faultNumber"] == 0):
return "TN"
|
[
0,
1,
2,
3,
4
] |
2,130 |
fcb13b087b9c967ab16b64885411cc4aae98583c
|
<mask token>
class InviteAdmin(admin.ModelAdmin):
<mask token>
<mask token>
|
<mask token>
class InviteAdmin(admin.ModelAdmin):
list_display = ('invitee', 'inviter', 'created_on', 'approved',
'rejected', 'used')
<mask token>
|
<mask token>
class InviteAdmin(admin.ModelAdmin):
list_display = ('invitee', 'inviter', 'created_on', 'approved',
'rejected', 'used')
admin.site.register(Invite, InviteAdmin)
|
from django.contrib import admin
from .models import Invite
class InviteAdmin(admin.ModelAdmin):
list_display = ('invitee', 'inviter', 'created_on', 'approved',
'rejected', 'used')
admin.site.register(Invite, InviteAdmin)
| null |
[
1,
2,
3,
4
] |
2,131 |
07dc058ecef323ffd41299245e4fcafdc9e41506
|
<mask token>
def resultados(request, total):
latest_question_list = Pregunta.objects.order_by('fecha')[:total]
output = ', '.join([q.descripcion for q in latest_question_list])
return HttpResponse(output)
<mask token>
|
<mask token>
def detalle(request, id_pregunta):
pregunta = Pregunta.objects.get(id=id_pregunta)
template = loader.get_template('polls/detalle.html')
context = {'pregunta': pregunta}
return HttpResponse(template.render(context, request))
def resultados(request, total):
latest_question_list = Pregunta.objects.order_by('fecha')[:total]
output = ', '.join([q.descripcion for q in latest_question_list])
return HttpResponse(output)
<mask token>
|
<mask token>
def index(request):
preguntas = Pregunta.objects.order_by('-fecha')[:5]
template = loader.get_template('polls/index.html')
context = {'listado': preguntas}
return HttpResponse(template.render(context, request))
def detalle(request, id_pregunta):
pregunta = Pregunta.objects.get(id=id_pregunta)
template = loader.get_template('polls/detalle.html')
context = {'pregunta': pregunta}
return HttpResponse(template.render(context, request))
def resultados(request, total):
latest_question_list = Pregunta.objects.order_by('fecha')[:total]
output = ', '.join([q.descripcion for q in latest_question_list])
return HttpResponse(output)
<mask token>
|
from django.http import HttpResponse
from polls.models import Pregunta
from django.template import loader
def index(request):
preguntas = Pregunta.objects.order_by('-fecha')[:5]
template = loader.get_template('polls/index.html')
context = {'listado': preguntas}
return HttpResponse(template.render(context, request))
def detalle(request, id_pregunta):
pregunta = Pregunta.objects.get(id=id_pregunta)
template = loader.get_template('polls/detalle.html')
context = {'pregunta': pregunta}
return HttpResponse(template.render(context, request))
def resultados(request, total):
latest_question_list = Pregunta.objects.order_by('fecha')[:total]
output = ', '.join([q.descripcion for q in latest_question_list])
return HttpResponse(output)
<mask token>
|
from django.http import HttpResponse
from polls.models import Pregunta
from django.template import loader
def index(request):
preguntas = Pregunta.objects.order_by('-fecha')[:5]
template = loader.get_template('polls/index.html')
context = { 'listado': preguntas,}
return HttpResponse(template.render(context, request))
def detalle(request, id_pregunta):
pregunta = Pregunta.objects.get(id=id_pregunta)
template = loader.get_template('polls/detalle.html')
context = { 'pregunta': pregunta }
return HttpResponse(template.render(context, request))
def resultados(request, total):
latest_question_list = Pregunta.objects.order_by('fecha')[:total]
output = ', '.join([q.descripcion for q in latest_question_list])
return HttpResponse(output)
"""
-Construir una vista que retorne todas las opciones asociadas a una pregunta
*FILTRAR POR ID DE PREGUNTA
"""
|
[
1,
2,
3,
4,
5
] |
2,132 |
8fe45332ce09195beabb24c8cbb56868c564ded4
|
<mask token>
|
<mask token>
def test(data):
actions.navigate(data.env.url + 'tabs/')
actions.send_keys('#title', 'lorem ipsum')
actions.click('#goButtonCustom')
actions.assert_amount_of_windows(2)
actions.close_window_by_partial_title('lorem')
golem_steps.assert_last_step_message(
"Close window by partial title 'lorem'")
actions.assert_amount_of_windows(1)
|
<mask token>
description = 'close_window_by_partial_title action'
def test(data):
actions.navigate(data.env.url + 'tabs/')
actions.send_keys('#title', 'lorem ipsum')
actions.click('#goButtonCustom')
actions.assert_amount_of_windows(2)
actions.close_window_by_partial_title('lorem')
golem_steps.assert_last_step_message(
"Close window by partial title 'lorem'")
actions.assert_amount_of_windows(1)
|
from golem import actions
from projects.golem_integration.pages import golem_steps
description = 'close_window_by_partial_title action'
def test(data):
actions.navigate(data.env.url + 'tabs/')
actions.send_keys('#title', 'lorem ipsum')
actions.click('#goButtonCustom')
actions.assert_amount_of_windows(2)
actions.close_window_by_partial_title('lorem')
golem_steps.assert_last_step_message(
"Close window by partial title 'lorem'")
actions.assert_amount_of_windows(1)
| null |
[
0,
1,
2,
3
] |
2,133 |
d45ca839a24093266c48e5f97164b160190b154d
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('django_otp', '0001_initial')]
operations = [migrations.AddField(model_name='otpsecrets', name=
'issuer_name', field=models.CharField(blank=True, db_index=True,
max_length=40))]
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('django_otp', '0001_initial')]
operations = [migrations.AddField(model_name='otpsecrets', name=
'issuer_name', field=models.CharField(blank=True, db_index=True,
max_length=40))]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-29 03:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('django_otp', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='otpsecrets',
name='issuer_name',
field=models.CharField(blank=True, db_index=True, max_length=40),
),
]
|
[
0,
1,
2,
3,
4
] |
2,134 |
b2b961c6ff1d975d80a84be361321ab44dc026a0
|
<mask token>
|
<mask token>
class QueueMaster(QueueSubscriberManager, QueuePublisherManager,
QueueLifecycleManager):
<mask token>
pass
|
<mask token>
class QueueMaster(QueueSubscriberManager, QueuePublisherManager,
QueueLifecycleManager):
"""
This class interfaces all types of queue objects that you might want.
"""
pass
|
from queuingservices.managers.queue_lifecycle_manager import QueueLifecycleManager
from queuingservices.managers.queue_publisher_manager import QueuePublisherManager
from queuingservices.managers.queue_subscriber_manager import QueueSubscriberManager
class QueueMaster(QueueSubscriberManager, QueuePublisherManager,
QueueLifecycleManager):
"""
This class interfaces all types of queue objects that you might want.
"""
pass
| null |
[
0,
1,
2,
3
] |
2,135 |
302634b93725ceb9333e236021cbb64e023ff798
|
<mask token>
|
<mask token>
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
<mask token>
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
|
<mask token>
LDB_TOKEN = 'NULLTOKEN'
WSDL = (
'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
)
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.
ComplexType([xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.
String())]))
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
|
import curses
from zeep import Client
from zeep import xsd
from zeep.plugins import HistoryPlugin
import time
from datetime import datetime
import os
LDB_TOKEN = 'NULLTOKEN'
WSDL = (
'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
)
if LDB_TOKEN == '':
raise Exception(
'Please configure your OpenLDBWS token in getDepartureBoardExample!')
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken', xsd.
ComplexType([xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue', xsd.
String())]))
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, '[A]', curses.A_BOLD)
stdscr.addstr(height - 3, 6, 'Arrivals')
stdscr.addstr(height - 3, 15, '[D]', curses.A_BOLD)
stdscr.addstr(height - 3, 19, 'Departures')
stdscr.addstr(height - 2, 2, '[Q]', curses.A_BOLD)
stdscr.addstr(height - 2, 6, 'Quit')
stdscr.addstr(height - 2, width - 28, 'Version 1.0 By RaithSphere')
stdscr.addstr(1, 2, 'Train info powered by National Rail')
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Destination', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.
location[0].locationName, curses.color_pair(2) | curses
.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != 'On time':
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN',
_soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, 'Time', curses.A_BOLD)
stdscr.addstr(5, width - width + 15, 'Origin', curses.A_BOLD)
stdscr.addstr(5, width - 25, 'Plat', curses.A_BOLD)
stdscr.addstr(5, width - 15, 'Expected', curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = '?'
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[
0].locationName, curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != 'On time':
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
|
import curses
from zeep import Client
from zeep import xsd
from zeep.plugins import HistoryPlugin
import time
from datetime import datetime
import os
LDB_TOKEN = 'NULLTOKEN'
WSDL = 'http://lite.realtime.nationalrail.co.uk/OpenLDBWS/wsdl.aspx?ver=2017-10-01'
if LDB_TOKEN == '':
raise Exception("Please configure your OpenLDBWS token in getDepartureBoardExample!")
history = HistoryPlugin()
client = Client(wsdl=WSDL, plugins=[history])
header = xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}AccessToken',
xsd.ComplexType([
xsd.Element(
'{http://thalesgroup.com/RTTI/2013-11-28/Token/types}TokenValue',
xsd.String()),
])
)
header_value = header(TokenValue=LDB_TOKEN)
def main(stdscr):
res = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.erase()
while True:
height, width = stdscr.getmaxyx()
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.border(0)
stdscr.hline(height - 4, 1, curses.ACS_BSBS, width - 2)
stdscr.addstr(height - 3, 2, "[A]", curses.A_BOLD)
stdscr.addstr(height - 3, 6, "Arrivals")
stdscr.addstr(height - 3, 15, "[D]", curses.A_BOLD)
stdscr.addstr(height - 3, 19, "Departures")
stdscr.addstr(height - 2, 2, "[Q]", curses.A_BOLD)
stdscr.addstr(height - 2, 6, "Quit")
stdscr.addstr(height - 2, width - 28, "Version 1.0 By RaithSphere")
stdscr.addstr(1, 2, "Train info powered by National Rail")
stdscr.addstr(1, width - 10, datetime.now().strftime('%H:%M:%S'))
stdscr.hline(2, 1, curses.ACS_BSBS, width - 2)
stdscr.refresh()
stdscr.refresh()
key = stdscr.getch()
if key == ord('q'):
break
elif key == ord('d'):
res2 = client.service.GetDepartureBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Departure's from " + res2.locationName)
stdscr.addstr(5, width - width + 5, "Time", curses.A_BOLD)
stdscr.addstr(5, width - width + 15, "Destination", curses.A_BOLD)
stdscr.addstr(5, width - 25, "Plat", curses.A_BOLD)
stdscr.addstr(5, width - 15, "Expected", curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res2.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = "?"
stdscr.addstr(7 + i, width - width + 5, t.std)
stdscr.addstr(7 + i, width - width + 15, t.destination.location[0].locationName,
curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.etd != "On time":
stdscr.addstr(7 + i, width - 15, t.etd, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.etd)
i += 1
elif key == ord('a'):
res3 = client.service.GetArrivalBoard(numRows=10, crs='NAN', _soapheaders=[header_value])
stdscr.erase()
stdscr.border(0)
stdscr.addstr(3, 2, "Arrivals's at " + res3.locationName)
stdscr.addstr(5, width - width + 5, "Time", curses.A_BOLD)
stdscr.addstr(5, width - width + 15, "Origin", curses.A_BOLD)
stdscr.addstr(5, width - 25, "Plat", curses.A_BOLD)
stdscr.addstr(5, width - 15, "Expected", curses.A_BOLD)
stdscr.hline(6, width - width + 5, curses.ACS_BSBS, 4)
stdscr.hline(6, width - width + 15, curses.ACS_BSBS, 11)
stdscr.hline(6, width - 25, curses.ACS_BSBS, 4)
stdscr.hline(6, width - 15, curses.ACS_BSBS, 8)
services = res3.trainServices.service
i = 0
while i < len(services):
t = services[i]
if not t.platform:
t.platform = "?"
stdscr.addstr(7 + i, width - width + 5, t.sta)
stdscr.addstr(7 + i, width - width + 15, t.origin.location[0].locationName,
curses.color_pair(2) | curses.A_BOLD)
stdscr.addstr(7 + i, width - 25, t.platform)
if t.eta != "On time":
stdscr.addstr(7 + i, width - 15, t.eta, curses.A_STANDOUT)
else:
stdscr.addstr(7 + i, width - 15, t.eta)
i += 1
stdscr.refresh()
curses.wrapper(main)
|
[
0,
2,
3,
4,
5
] |
2,136 |
3314ffdbc2f10170176c590aebf49c416bcc8856
|
import os
import mysql.connector
import time
from flask import Flask, render_template
app = Flask(__name__)
def dbconnect():
return mysql.connector.connect(user= , password= , host="mysqlshereen.mysql.database.azure.com", port=3306, database='test')
@app.route('/result', methods=['POST', 'GET'])
def query():
start_time = time.time()
display = []
conn=dbconnect()
curr=conn.cursor()
curr.execute("""
UPDATE TABLE SET columnName = null WHERE YourCondition
delete from FOOD where DIGITS >900;""")
sql=curr.fetchall()
for row in sql:
tuple = (row[0], row[1], row[3])
display.append(tuple)
end_time = time.time()
total_time = end_time - start_time
print("final time:", total_time)
display.append(total_time)
curr.close()
conn.close()
return render_template('display.html', display=display)
@app.route('/download', methods=['POST', 'GET'])
def download():
list = []
if request.method == 'POST':
mytext = request.form['text1']
mytext1 = request.form['text2']
conn = dbconnect()
curr = conn.cursor()
r1=int(mytext)
r2 = int(mytext1)
curr.execute('select DIGITS,CATEGORY from food DIGITS ">"' +r1+'DIGITS"<"'+r2)
sql = curr.fetchall()
#curr.execute('select PICTURE from FOOD')
data = curr.fetchone()[0]
for row in data:
with open('/home/shereen/quiz8/static/'+name+'.jpg','w') as local_file:
local_file.write(data)
list.append(data)
#img_name = name+'.jpg'
curr.close()
conn.close()
#return img_name
return render_template('result.html',list=list,)
def insert():
conn = dbconnect()
curr = conn.cursor()
path = '/home/shereen/quiz8/data/'
for root, dirs, files in os.walk('/home/shereen/quiz8/data/'):
for file in files:
img_file = file.replace('csv', 'jpg')
print(img_file)
if file.endswith(".csv"):
with open(path + file) as f:
name = file[:-4]
lines = f.readlines()
line1 = lines[0].replace('\r', '')
line2 = lines[1].replace('\r', '')
line3 = lines[2].replace('\r', '')
with open('/home/shereen/quiz8/data/' + img_file, 'rb') as img:
image = img.read()
sql = 'insert into FOOD (NAME,ingred,digits,category,picture) values (%s,%s,%s,%s,%s)'
args = (name,line2, line1, line3, image)
curr.execute(sql, args)
conn.commit()
def dbcount():
print('hi')
conn = dbconnect()
cur = conn.cursor()
start_time = time.time()
conn = dbconnect()
cur = conn.cursor()
quer = 'select count(*) from FOOD'
cur.execute(quer)
res = cur.fetchone()
print(res[0])
conn.commit()
cur.close()
conn.close()
end_time = time.time()
tot = end_time - start_time
cur.close()
conn.close()
return res
@app.route('/')
def hello_world():
insert()
#query()
img_name = download()
#return render_template('result.html', img_name=img_name)
return render_template('main.html')
if __name__ == '__main__':
app.run()
| null | null | null | null |
[
0
] |
2,137 |
c632c50028fee2f19fb65458f0b55ec228b8006f
|
<mask token>
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
<mask token>
|
<mask token>
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
<mask token>
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
<mask token>
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
<mask token>
def repetitionAudit(set):
pass
|
<mask token>
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass
|
trial = [1, 2, 3]
trial2 = [3, 4, 5]
def recursiveUnioniser(set):
if isinstance(set[0], int):
return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else:
return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass
|
#This is a module which implements Naive Set Theory in Python.
#It will be useful for Unions, Intersections, Mutual Exclusion, and more.
#ideas: print(sum([[[1],[2]], [[3],[4]], [[5],[6]]], [])) Monoid - abstraction on +
trial = [1, 2, 3]
trial2 = [3, 4, 5]
def recursiveUnioniser(set):
if isinstance(set[0], int): return set
res = []
for i in range(len(set)):
for j in range(len(set[i])):
res.append(set[i][j])
if isinstance(res[0], list):
return recursiveUnioniser(res)
else: return res
print(recursiveUnioniser(trial))
def mutualexclusion(set_a, set_b):
res = [i for i in set_a if i not in set_b]
res2 = [i for i in set_b if i not in set_a]
res += res2
return res
print(mutualexclusion(trial, trial2))
def intersection(set_a, set_b):
res = [i for i in set_a if i in set_b]
return res
print(intersection(trial, trial2))
def repetitionAudit(set):
pass #this will audit a list to see if an element occurs more than once
#If it does, it will remove this element and return the list
|
[
1,
4,
5,
6,
7
] |
2,138 |
17ac827d181650cd8bd6e75ca7ff363d70d3c4a7
|
import collections
import cPickle as pickle
import os
import shutil
import warnings
import numpy as np
import theano
import theano.tensor as T
import tables
#theano.config.compute_test_value = 'warn'
class SGD_Trainer(object):
"""Implementation of a stochastic gradient descent trainer
"""
#{{{ Properties
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, val):
#FIXME: make this work for other input types
if not isinstance(val, np.ndarray):
raise TypeError('Resetting trainer inputs currently only works for '
'ndarray inputs!')
self._inputs = val
self._inputs_theano = theano.shared(
self._inputs[:self._loadsize],
name='inputs')
self._numcases = self._inputs.shape[0]
self._numloads = self._numcases // self._loadsize
print 'recompiling trainer functions...'
self._compile_functions()
@property
def gradient_clip_threshold(self):
return self._gradient_clip_threshold.get_value()
@property
def learningrate_decay_factor(self):
return self._learningrate_decay_factor.get_value()
@learningrate_decay_factor.setter
def learningrate_decay_factor(self, val):
self._learningrate_decay_factor.set_value(np.float32(val))
@property
def learningrate_decay_interval(self):
return self._learningrate_decay_interval.get_value()
@learningrate_decay_interval.setter
def learningrate_decay_interval(self, val):
self._learningrate_decay_interval.set_value(np.int64(val))
@gradient_clip_threshold.setter
def gradient_clip_threshold(self, val):
self._gradient_clip_threshold.set_value(np.float32(val))
@property
def learningrate(self):
return self._learningrate.get_value()
@learningrate.setter
def learningrate(self, value):
self._learningrate.set_value(np.float32(value))
@property
def momentum(self):
return self._momentum.get_value()
@momentum.setter
def momentum(self, val):
self._momentum.set_value(np.float32(val))
@property
def batchsize(self):
return self._batchsize
@property
def loadsize(self):
return self._loadsize
@property
def numcases(self):
return self._numcases
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, val):
self._verbose = bool(val)
@property
def epochcount(self):
return self._epochcount
@epochcount.setter
def epochcount(self, val):
self._epochcount = int(val)
@property
def momentum_batchcounter(self):
return self._momentum_batchcounter
#}}}
def __init__(self, model=None, inputs=None, batchsize=100, learningrate=.01,
momentum=0.9, loadsize=None,
rng=None, verbose=True,
numcases=None, gradient_clip_threshold=1000,
numepochs_per_load=1,
rmsprop=None, cost=None, params=None, inputvar=None,
grads=None):
#{{{ Initialization of Properties
assert model is not None or (
cost is not None and params is not None and
inputvar is not None and grads is not None), (
"either a model instance or cost, params and inputvar "
"have to be passed to the SGD_Trainer constructor")
if model is not None:
self._model = model
self._params = model.params
self._cost = model._cost
self._inputvar = model.inputs
self._grads = model._grads
else:
self._params = params
self._cost = cost
self._inputvar = inputvar
self._grads = grads
self._learningrate = theano.shared(np.float32(learningrate),
name='learningrate')
self.numepochs_per_load = numepochs_per_load
self._momentum = theano.shared(np.float32(momentum),
name='momentum')
self._total_stepcount = 0
self._gradient_clip_threshold = theano.shared(
np.float32(gradient_clip_threshold),
name='gradient_clip_threshold')
self._avg_gradnorm = theano.shared(np.float32(0.), name='avg_gradnorm')
self._learningrate_decay_factor = theano.shared(
np.float32,
name='learningrate_decay_factor')
self._learningrate_decay_interval = theano.shared(
np.int64,
name='learningrate_decay_interval')
if isinstance(inputs, str):
self._inputs_type = 'h5'
self._inputsfile = tables.openFile(inputs, 'r')
self._inputs = self._inputsfile.root.inputs_white
elif hasattr(inputs, '__call__'):
self._inputs_type = 'function'
self._inputs_fn = inputs
else:
self._inputs_type = 'numpy'
self._inputs = inputs
self._model = model
self._numparams = reduce(lambda x,y: x+y,
[p.get_value().size for p in self._params])
if self._inputs_type == 'function':
numcases = loadsize
else:
if numcases is None or numcases > self._inputs.shape[0]:
numcases = self._inputs.shape[0]
self._numcases = numcases
self._batchsize = batchsize
self._loadsize = loadsize
self._verbose = verbose
if self._batchsize > self._numcases:
self._batchsize = self._numcases
if self._loadsize == None:
self._loadsize = self._batchsize * 100
if self._loadsize > self._numcases:
self._loadsize = self._numcases
self._numloads = self._numcases // self._loadsize
self._numbatches = self._loadsize // self._batchsize
if self._inputs_type == 'h5':
self._inputs_theano = theano.shared(
self._inputs.read(stop=self._loadsize))
elif self._inputs_type == 'function':
# TODO: generate inputs for first load
print "generating first load..."
inp = np.empty((self._loadsize, ) + (self._inputs_fn().shape),
dtype=np.float32)
for i in xrange(self._loadsize):
inp[i] = self._inputs_fn()
if (i + 1) % 100 == 0:
print '{0}/{1}'.format(i + 1, self.loadsize)
self._inputs_theano = theano.shared(
inp)
else:
self._inputs_theano = theano.shared(
self._inputs[:self._loadsize],
name='inputs')
#self._inputs_theano.tag.test_value = np.random.randn(100, model.n_vis*4)
self._momentum_batchcounter = 0
if rng is None:
self._rng = np.random.RandomState(1)
else:
self._rng = rng
self._epochcount = 0
self._index = T.lscalar()
self._incs = \
dict([(p, theano.shared(value=np.zeros(p.get_value().shape,
dtype=theano.config.floatX), name='inc_'+p.name))
for p in self._params])
self._inc_updates = collections.OrderedDict()
self.rmsprop = rmsprop
if self.rmsprop is not None:
self.averaging_coeff=0.95
self.stabilizer=1e-2
self._avg_grad_sqrs = \
dict([(p, theano.shared(value=np.zeros(p.get_value().shape,
dtype=theano.config.floatX), name='avg_grad_sqr_'+p.name))
for p in self._params])
self._avg_grad_sqrs_updates = collections.OrderedDict()
self._updates_nomomentum = collections.OrderedDict()
self._updates = collections.OrderedDict()
self._n = T.lscalar('n')
self._n.tag.test_value = 0.
self._noop = 0.0 * self._n
self._batch_idx = theano.shared(
value=np.array(0, dtype=np.int64), name='batch_idx')
self.costs = []
self._compile_functions()
#}}}
def __del__(self):
if self._inputs_type == 'h5':
self._inputsfile.close()
def save(self, filename):
"""Saves the trainers parameters to a file
Params:
filename: path to the file
"""
ext = os.path.splitext(filename)[1]
if ext == '.pkl':
print 'saving trainer params to a pkl file'
self.save_pkl(filename)
else:
print 'saving trainer params to a hdf5 file'
self.save_h5(filename)
def save_h5(self, filename):
"""Saves a HDF5 file containing the trainers parameters
Params:
filename: path to the file
"""
try:
shutil.copyfile(filename, '{0}_bak'.format(filename))
except IOError:
print 'could not make backup of trainer param file (which is \
normal if we haven\'t saved one until now)'
paramfile = tables.openFile(filename, 'w')
paramfile.createArray(paramfile.root, 'learningrate',
self.learningrate)
paramfile.createArray(paramfile.root, 'verbose', self.verbose)
paramfile.createArray(paramfile.root, 'loadsize', self.loadsize)
paramfile.createArray(paramfile.root, 'batchsize', self.batchsize)
paramfile.createArray(paramfile.root, 'momentum',
self.momentum)
paramfile.createArray(paramfile.root, 'epochcount',
self.epochcount)
paramfile.createArray(paramfile.root, 'momentum_batchcounter',
self.momentum_batchcounter)
incsgrp = paramfile.createGroup(paramfile.root, 'incs', 'increments')
for p in self._params:
paramfile.createArray(incsgrp, p.name, self._incs[p].get_value())
if self.rmsprop is not None:
avg_grad_sqrs_grp = paramfile.createGroup(paramfile.root, 'avg_grad_sqrs')
for p in self._params:
paramfile.createArray(avg_grad_sqrs_grp, p.name, self._avg_grad_sqrs[p].get_value())
paramfile.close()
def save_pkl(self, filename):
"""Saves a pickled dictionary containing the parameters to a file
Params:
filename: path to the file
"""
param_dict = {}
param_dict['learningrate'] = self.learningrate
param_dict['verbose'] = self.verbose
param_dict['loadsize'] = self.loadsize
param_dict['batchsize'] = self.batchsize
param_dict['momentum'] = self.momentum
param_dict['epochcount'] = self.epochcount
param_dict['momentum_batchcounter'] = self.momentum_batchcounter
param_dict['incs'] = dict(
[(p.name, self._incs[p].get_value()) for p in self._params])
if self.rmsprop is not None:
param_dict['avg_grad_sqrs'] = dict(
[(p.name, self._avg_grad_sqrs[p].get_value()) for p in self._params])
pickle.dump(param_dict, open(filename, 'wb'))
def load(self, filename):
"""Loads pickled dictionary containing parameters from a file
Params:
filename: path to the file
"""
param_dict = pickle.load(open('%s' % filename, 'rb'))
self.learningrate = param_dict['learningrate']
self.verbose = param_dict['verbose']
self._loadsize = param_dict['loadsize']
self._batchsize = param_dict['batchsize']
self.momentum = param_dict['momentum']
self.epochcount = param_dict['epochcount']
self._momentum_batchcounter = param_dict['momentum_batchcounter']
for param_name in param_dict['incs'].keys():
for p in self._params:
if p.name == param_name:
self._incs[p].set_value(param_dict['incs'][param_name])
if self.rmsprop is not None:
for param_name in param_dict['avg_grad_sqrs'].keys():
for p in self._params:
if p.name == param_name:
self._avg_grad_sqrs[p].set_value(param_dict['avg_grad_sqrs'][param_name])
self._numbatches = self._loadsize // self._batchsize
if self._inputs_type != 'function':
self._numloads = self._inputs.shape[0] // self._loadsize
if self._inputs_type == 'h5':
self._inputs_theano.set_value(
self._inputs.read(stop=self._loadsize))
else:
self._inputs_theano.set_value(self._inputs[:self._loadsize])
def reset_incs(self):
for p in self._params:
self._incs[p].set_value(
np.zeros(p.get_value().shape, dtype=theano.config.floatX))
def reset_avg_grad_sqrs(self):
for p in self._params:
self._avg_grad_sqrs[p].set_value(
np.zeros(p.get_value().shape, dtype=theano.config.floatX))
def _compile_functions(self):
self._gradnorm = T.zeros([])
for _param, _grad in zip(self._params, self._grads):
# apply rmsprop to before clipping gradients
if self.rmsprop is not None:
avg_grad_sqr = self._avg_grad_sqrs[_param]
new_avg_grad_sqr = self.averaging_coeff * avg_grad_sqr + \
(1 - self.averaging_coeff) * T.sqr(_grad)
self._avg_grad_sqrs_updates[avg_grad_sqr] = new_avg_grad_sqr
rms_grad_t = T.sqrt(new_avg_grad_sqr)
rms_grad_t = T.maximum(rms_grad_t, self.stabilizer)
_grad = _grad / rms_grad_t
self._gradnorm += T.sum(_grad**2) # calculated on the rmsprop 'grad'
self._gradnorm = T.sqrt(self._gradnorm)
self.gradnorm = theano.function(
inputs=[],
outputs=self._gradnorm,
givens={
self._inputvar:
self._inputs_theano[
self._batch_idx*self.batchsize:
(self._batch_idx+1)*self.batchsize]})
avg_gradnorm_update = {
self._avg_gradnorm: self._avg_gradnorm * .8 + self._gradnorm * .2}
for _param, _grad in zip(self._params, self._grads):
if hasattr(self._model, 'skip_params'):
if _param.name in self._model.skip_params:
continue
_clip_grad = T.switch(
T.gt(self._gradnorm, self._gradient_clip_threshold),
_grad * self._gradient_clip_threshold / self._gradnorm, _grad)
try: # ... to apply learningrate_modifiers
# Cliphid version:
self._inc_updates[self._incs[_param]] = \
self._momentum * self._incs[_param] - \
self._learningrate * \
self._model.layer.learningrate_modifiers[
_param.name] * _clip_grad
self._updates[_param] = _param + self._incs[_param]
self._updates_nomomentum[_param] = _param - \
self._learningrate * \
self._model.layer.learningrate_modifiers[_param.name] * \
_clip_grad
except AttributeError:
self._inc_updates[self._incs[_param]] = self._momentum * \
self._incs[_param] - self._learningrate * _clip_grad
self._updates[_param] = _param + self._incs[_param]
self._updates_nomomentum[_param] = _param - \
self._learningrate * _clip_grad
# first update gradient norm running avg
ordered_updates = collections.OrderedDict(avg_gradnorm_update)
# so that it is considered in the parameter update computations
ordered_updates.update(self._inc_updates)
self._updateincs = theano.function(
[], [self._cost, self._avg_gradnorm], updates = ordered_updates,
givens = {self._inputvar:self._inputs_theano[
self._batch_idx*self._batchsize:(self._batch_idx+1)* \
self._batchsize]})
self._trainmodel = theano.function(
[self._n], self._noop, updates = self._updates)
self._trainmodel_nomomentum = theano.function(
[self._n], self._noop, updates = self._updates_nomomentum,
givens = {self._inputvar:self._inputs_theano[
self._batch_idx*self._batchsize:(self._batch_idx+1)* \
self._batchsize]})
self._momentum_batchcounter = 0
def _trainsubstep(self, batchidx):
self._batch_idx.set_value(batchidx)
stepcost, avg_gradnorm = self._updateincs()
# catch NaN, before updating params
if np.isnan(stepcost):
raise ValueError, 'Cost function returned nan!'
elif np.isinf(stepcost):
raise ValueError, 'Cost function returned infinity!'
if self._momentum_batchcounter < 10:
self._momentum_batchcounter += 1
self._trainmodel_nomomentum(0)
else:
self._momentum_batchcounter = 10
self._trainmodel(0)
return stepcost, avg_gradnorm
def get_avg_gradnorm(self):
avg_gradnorm = 0.0
print self.gradnorm()
for batch_idx in range(self._numbatches):
self._batch_idx.set_value(batch_idx)
tmp = self.gradnorm()
avg_gradnorm += tmp / self._numbatches
print avg_gradnorm
return avg_gradnorm
def step(self):
total_cost = 0.0
cost = 0.0
stepcount = 0.0
self._epochcount += 1
for load_index in range(self._numloads):
indices = np.random.permutation(self._loadsize)
if self._inputs_type == 'h5':
self._inputs_theano.set_value(
self._inputs.read(
start=load_index * self._loadsize,
stop=(load_index + 1) * self._loadsize)[indices])
elif self._inputs_type == 'function':
# if load has been used n times, gen new load
if self._epochcount % self.numepochs_per_load == 0:
print 'using data function to generate new load...'
inp = np.empty((self._loadsize, ) + (self._inputs_fn().shape),
dtype=np.float32)
for i in xrange(self._loadsize):
inp[i] = self._inputs_fn()
if (i + 1) % 100 == 0:
print '{0}/{1}'.format(i + 1, self.loadsize)
self._inputs_theano.set_value(inp)
print 'done'
else:
self._inputs_theano.set_value(
self._inputs[load_index * self._loadsize + indices])
for batch_index in self._rng.permutation(self._numbatches):
stepcount += 1.0
self._total_stepcount += 1.0
stepcost, avg_gradnorm = self._trainsubstep(batch_index)
cost = (1.0-1.0/stepcount)*cost + (1.0/stepcount)* \
stepcost
if self._verbose:
print '> epoch {0:d}, load {1:d}/{2:d}, cost: {3:f}, avg. gradnorm: {4}'.format(
self._epochcount, load_index + 1, self._numloads, cost, avg_gradnorm)
if hasattr(self._model, 'monitor'):
self._model.monitor()
self.costs.append(cost)
return cost
| null | null | null | null |
[
0
] |
2,139 |
f5b18673dd5a3ba3070c07e88ae83a531669311a
|
<mask token>
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
|
<mask token>
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
|
<mask token>
metadata = MetaData()
users = Table('users', metadata, Column('id', Integer, primary_key=True),
Column('name', String), Column('fullname', String))
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
|
<mask token>
import pytest
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
users = Table('users', metadata, Column('id', Integer, primary_key=True),
Column('name', String), Column('fullname', String))
def test_create_engine():
eng = create_engine('cql://user:password@localhost:49154/system')
assert eng.execute('select * from system.schema_keyspaces')
def test_table_names():
eng = create_engine('cql://user:password@localhost:49154/system')
eng.table_names()
def test_create_all():
eng = create_engine('cql://user:password@localhost:49154/system')
metadata.create_all(eng)
|
"""
Tests for `sqlalchemy-cql` module.
"""
import pytest
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
def test_create_engine():
eng = create_engine("cql://user:password@localhost:49154/system")
assert eng.execute("select * from system.schema_keyspaces")
def test_table_names():
eng = create_engine("cql://user:password@localhost:49154/system")
eng.table_names()
def test_create_all():
eng = create_engine("cql://user:password@localhost:49154/system")
metadata.create_all(eng)
|
[
1,
3,
4,
5,
6
] |
2,140 |
eb890c68885cbab032ce9d6f3be3fd7013a2788b
|
<mask token>
|
<mask token>
os.chdir(main_dir)
<mask token>
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
<mask token>
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
<mask token>
main_dir = (
'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'
)
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
loan_seller_cols = [val for val in cmbs.columns.values if re.search(
'(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
regex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', '\\/': '',
'\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
import pandas as pd
import os
import re
main_dir = (
'C:\\Users\\Username\\Desktop\\Python\\End-to-End-Data-Analysis\\1. Get the Data\\table'
)
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
loan_seller_cols = [val for val in cmbs.columns.values if re.search(
'(^Loan\\s#|^Seller|^Property\\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
regex_dict = {'_\\d': '', '\\(.+\\)+': '', '#': '', '%': '', '\\/': '',
'\\s\\s+': ' ', '^\\s+': '', '\\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
import pandas as pd
import os
import re
main_dir = r'C:\Users\Username\Desktop\Python\End-to-End-Data-Analysis\1. Get the Data\table'
file = 'CMBS Table.csv'
os.chdir(main_dir)
cmbs = pd.read_csv(file, encoding='ISO-8859-1')
# Delete extra Loan & Seller columns
loan_seller_cols = [val for val in cmbs.columns.values if re.search('(^Loan\s#|^Seller|^Property\sName)', val)][3:]
for col in loan_seller_cols:
cmbs.drop(columns=col, axis=1, inplace=True)
# Regex to edit headers
regex_dict = {'_\d': '', '\(.+\)+': '', '#': '', '%': '', r'\/' : '', '\s\s+': ' ', '^\s+': '', '\s+$': ''}
for key, value in regex_dict.items():
cmbs.columns = [re.sub(key, value, col) for col in cmbs.columns]
# Delete
for col in list(cmbs.columns.values):
try:
if cmbs[col].str.normalize('NFKD').str.match(' ').all():
cmbs.drop(columns=col, axis=1, inplace=True)
except AttributeError:
continue
cmbs.to_csv('CMBS Final.csv', index=False, encoding='ISO-8859-1')
|
[
0,
1,
2,
3,
4
] |
2,141 |
0e03a3b3401075384e580bc2bb8af1a106f1d238
|
<mask token>
class AuditMiddleware(object):
<mask token>
<mask token>
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
<mask token>
|
<mask token>
class AuditMiddleware(object):
<mask token>
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
<mask token>
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
request._handler_func = partial(self.pre_action_handler,
update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(
settings.DISPATCH_UID, request))
def process_response(self, request, response):
signals.audit_presave.disconnect(dispatch_uid=(settings.
DISPATCH_UID, request))
return response
def pre_action_handler(self, sender, model_instance, audit_meta,
update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit'
) and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
from __future__ import unicode_literals
from functools import partial
from django.contrib.auth import get_user_model
from .default_settings import settings
from . import signals
class AuditMiddleware(object):
"""
middleware to add the user from requests to ModelChange objects.
This is independent of request logging and can be used separately.
"""
def process_request(self, request, *args, **kwargs):
if not settings.CHANGE_LOGGING:
return
user = getattr(request, 'user', None)
if user and not user.is_authenticated():
user = None
# build kwargs to pass to the signal handler
update_kwargs = {}
if user and isinstance(user, get_user_model()):
update_kwargs['user'] = user
if request.META.get('REMOTE_ADDR'):
update_kwargs['remote_addr'] = request.META.get('REMOTE_ADDR')
if request.META.get('REMOTE_HOST'):
update_kwargs['remote_host'] = request.META.get('REMOTE_HOST')
# keep the strong ref on the request, its a sane lifetime
request._handler_func = partial(self.pre_action_handler, update_kwargs=update_kwargs)
signals.audit_presave.connect(request._handler_func, dispatch_uid=(settings.DISPATCH_UID, request,),)
def process_response(self, request, response):
# disconnect signals for this request
# runs even if change logging is disabled in case it was disabled after the signal was created
signals.audit_presave.disconnect(dispatch_uid=(settings.DISPATCH_UID, request,))
return response
def pre_action_handler(self, sender, model_instance, audit_meta, update_kwargs=None, **kwargs):
if audit_meta and getattr(audit_meta, 'audit') and update_kwargs is not None:
audit_meta.update_additional_kwargs(update_kwargs)
|
[
2,
4,
5,
6,
7
] |
2,142 |
f5f1a4db33cea8421cb4236606dfb288efee7621
|
<mask token>
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), headers)
<mask token>
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template('roles.html', roles=roles,
operation_type=operation_type))
|
<mask token>
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template('clients.html', clients=clients,
operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template('roles.html', roles=roles,
operation_type=operation_type))
|
<mask token>
admin = Blueprint('admin', __name__, url_prefix='/passport/admin')
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template('clients.html', clients=clients,
operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template('roles.html', roles=roles,
operation_type=operation_type))
|
from flask import Blueprint, make_response, render_template, request
from flask_restful import Resource
from flask_security import login_required
from ..clients.service import list_clients
from ..roles.service import list_roles
from ...models import Client, Role
admin = Blueprint('admin', __name__, url_prefix='/passport/admin')
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template('index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template('clients.html', clients=clients,
operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template('roles.html', roles=roles,
operation_type=operation_type))
|
# coding: utf-8
from flask import Blueprint, make_response, render_template, request
from flask_restful import Resource
from flask_security import login_required
from ..clients.service import list_clients
from ..roles.service import list_roles
from ...models import Client, Role
admin = Blueprint('admin', __name__, url_prefix='/passport/admin')
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template(
'index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template(
'clients.html', clients=clients, operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template(
'roles.html', roles=roles, operation_type=operation_type))
|
[
2,
3,
4,
5,
6
] |
2,143 |
9db1887c5379623687d1dea343d72122bab66303
|
<mask token>
|
<mask token>
urlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,
name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]
|
from django.urls import path
from . import views
urlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,
name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]
|
from django.urls import path
from . import views # 현재 패키지에서 views 모듈을 가져옴
urlpatterns = [
path('', views.home, name='home'),
path('ppt1',views.ppt1,name='ppt1'),
path('ppt2',views.ppt2,name='ppt2'),
]
| null |
[
0,
1,
2,
3
] |
2,144 |
b30e6af035b589d5f4bd1bc6cccdd53c157861a0
|
#!/usr/bin/env python
# including libraries
import roslib
import sys
import rospy
import cv2
import math
from std_msgs.msg import String
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import matplotlib.pyplot as plt
MAP = np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,1,1,1,0,1,1,1,1,1,0,1,1,1,1,1,0,1,1,0],[0,1,0,1,0,1,0,0,0,0,0,1,0,0,0,0,0,0,1,0],[0,1,0,1,1,1,0,1,1,1,1,1,0,1,0,1,1,1,1,0],[0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,1,0,0,0,0],[0,1,1,1,1,1,0,1,1,1,0,1,0,1,1,1,1,1,1,0],[0,0,0,0,0,1,0,0,0,1,0,1,0,1,0,0,0,0,1,0],[0,1,1,1,0,1,0,1,1,1,0,1,1,1,0,1,1,1,1,0],[0,1,0,1,0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,0],[0,1,0,1,0,1,0,1,0,1,1,1,0,1,1,1,1,1,1,0],[0,1,0,1,0,1,0,1,0,1,0,1,0,0,0,0,0,0,1,0],[0,1,0,1,1,1,0,1,0,1,0,1,1,1,0,1,1,1,1,0],[0,1,0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0],[0,1,1,1,1,1,0,1,1,1,0,1,0,1,1,1,1,1,1,0],[0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0],[0,1,0,1,1,1,1,1,1,0,1,1,1,0,1,1,1,0,1,0],[0,1,0,1,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,1,1,1,0,1,1,1,0,1,1,1,0,1,1,1,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
position_history = (0,0)
class labyrinth_solver:
def __init__(self):
self.image_pub = rospy.Publisher("final_image",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/usb_cam/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError, e:
print e
# crop out the labyrinth region (y by x)
cv_image = cv_image[22:240, 44:268]
# resize the image to 200x200 each region is 10x10
cv_image = cv2.resize(cv_image, (400, 400))
# transfer the image from RGB to HSV
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# Red Ball Segmentation
lower_red = np.array([0,50,150])
upper_red = np.array([50,150,250])
temp_ball = cv2.inRange(hsv_image,lower_red,upper_red)
# Erosion and Dilation processing
kernel = np.ones((3,3),np.uint8)
temp_ball = cv2.dilate(temp_ball,kernel,iterations = 2)
#cv2.imshow("Red Ball", temp_ball)
# Calculate the contour
contours,hierarcy = cv2.findContours(temp_ball,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Select the biggest contout as the target
max_area = 0
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area=area
target = cnt
global position_history # calling global variable
# handling with target missing
if max_area >= 10:
(x,y),radius = cv2.minEnclosingCircle(target)
center = (int(x),int(y))
else:
center = position_history
# Compensate with some noise
radius = 10
if abs(center[0]-position_history[0])+abs(center[1]-position_history[1])<=4:
center = position_history
cv2.circle(cv_image,center,radius,(0,255,0),2)
position_history = center
cv2.imshow("Ball tracking", cv_image)
# manipulate the center coordinate to be the nearest 10 while extract the position in 20 by 20
# FIRST check who is more close to 0
checkx = center[0]%20-10
checky = center[1]%20-15
if abs(checkx) <= abs(checky):
newx = center[0] - checkx
newy = center[1]*0.955
elif abs(checkx) > abs(checky):
newx = center[0]
newy = 0.955*(center[1] - checky)
newcenter = (newx, int(newy))
# read the reference map for animation
map_ref = cv2.imread('/home/sunyue/catkin_ws/src/tracking/map.png')
cv2.circle(map_ref,newcenter,radius,(0,0,255),-5)
# SECOND transfer the real location to the 20x20 grid
gridx = newcenter[0]/20+1
gridy = newcenter[1]/20+1
# A* for path planning
goal = [10,2]
current = [gridx, gridy]
precheck = abs(current[0]-goal[0])+abs(current[1]-goal[1])
if precheck == 0: check = 0
else: check = 100
path = np.array([current])
backup = np.array([[0,0,0,0]])
while check!=0:
# generate the potential candidate
north = [current[0],current[1]-1]
south = [current[0],current[1]+1]
east = [current[0]+1,current[1]]
west = [current[0]-1,current[1]]
#print current
# calculate the heuristic
n_heuristic = math.sqrt(pow(north[0]-goal[0],2)+pow(north[1]-goal[1],2))
s_heuristic = math.sqrt(pow(south[0]-goal[0],2)+pow(south[1]-goal[1],2))
e_heuristic = math.sqrt(pow(east[0]-goal[0],2)+pow(east[1]-goal[1],2))
w_heuristic = math.sqrt(pow(west[0]-goal[0],2)+pow(west[1]-goal[1],2))
# check the punishment of obstacle
if MAP[north[1]-1,north[0]-1]==0: n_punish = 2000
else: n_punish = 0
if MAP[south[1]-1,south[0]-1]==0: s_punish = 2000
else: s_punish = 0
if MAP[east[1]-1,east[0]-1]==0: e_punish = 2000
else: e_punish = 0
if MAP[west[1]-1,west[0]-1]==0: w_punish = 2000
else: w_punish = 0
#print n_punish, s_punish, e_punish, w_punish
# check last node never go back
num = path.shape[0] # get the path step number
if num!=1:
last_step = path[-2]
n_check = north - last_step
s_check = south - last_step
e_check = east - last_step
w_check = west - last_step
if ( n_check[0]==0 and n_check[1]==0): n_punish = 2000
if ( s_check[0]==0 and s_check[1]==0): s_punish = 2000
if ( e_check[0]==0 and e_check[1]==0): e_punish = 2000
if ( w_check[0]==0 and w_check[1]==0): w_punish = 2000
# sum the cost together
n_cost = int(n_heuristic + n_punish)
s_cost = int(s_heuristic + s_punish)
e_cost = int(e_heuristic + e_punish)
w_cost = int(w_heuristic + w_punish)
cost = [n_cost, s_cost, e_cost, w_cost]
# there will be some situations should be taken into consideration
index = np.argmin(cost) # where the smallest cost is located
mincost = cost[index]
# First only one direction cost is less than 1000, then just pick that
if mincost<=1000: # there must be at least one solution
sumcheck = cost[0]+cost[1]+cost[2]+cost[3]
if sumcheck >= 6000: # only one next choice
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
elif (sumcheck >= 4000 and sumcheck < 6000) : # two posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
fakecost = cost
fakecost[index] = 2000 # mannually fake the minimum cost choice
fakeindex = np.argmin(fakecost) # where the smallest cost is located
if fakeindex == 0: branch = north
elif fakeindex == 1: branch = south
elif fakeindex == 2: branch = east
elif fakeindex == 3: branch = west
backup = np.append([[current[0],current[1],branch[0],branch[1]]], backup, axis=0)
# updat the current
current = next
elif (sumcheck >= 2000 and sumcheck < 4000) : # three posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
# second cost
secondcost = cost
secondcost[index] = 2000 # mannually fake the minimum cost choice
secondindex = np.argmin(secondcost) # where the smallest cost is located
if secondindex == 0: branch1 = north
elif secondindex == 1: branch1 = south
elif secondindex == 2: branch1 = east
elif secondindex == 3: branch1 = west
thirdcost = secondcost
thirdcost[secondindex] = 2000 # mannually fake the minimum cost choice
thirdindex = np.argmin(thirdcost) # where the smallest cost is located
if thirdindex == 0: branch2 = north
elif thirdindex == 1: branch2 = south
elif thirdindex == 2: branch2 = east
elif thirdindex == 3: branch2 = west
# update branch based on cost difference
backup = np.append([[current[0],current[1],branch2[0],branch2[1]]], backup, axis=0)
backup = np.append([[current[0],current[1],branch1[0],branch1[1]]], backup, axis=0)
# updat the current
current = next
elif mincost>=2000: # there is no next choice we have go to backup branchs
# next step is the first ranking branch
next = [backup[0,2],backup[0,3]]
# cut the path back
current = [backup[0,0],backup[0,1]]
compare = abs(path-current)
summation = sum(np.transpose(compare))
index = np.argmin(summation)
# cut the path from 0 to current one
path = path[:index+1]
# update the path with next step
path = np.append(path,[next],axis=0)
# delete the first backup
backup = backup[1:]
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
# A* algorithm is ended
steps = path.shape[0]
i = 0
while i < steps-1:
cv2.line(map_ref,(20*path[i,0]-10,20*path[i,1]-10),(20*path[i+1,0]-10,20*path[i+1,1]-10),(255,0,0),3)
i = i+1
cv2.imshow("Map Image", map_ref)
cv2.waitKey(1)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, encoding="bgr8"))
except CvBridgeError, e:
print e
def main(args):
ic = labyrinth_solver()
rospy.init_node('labyrinth_solver', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| null | null | null | null |
[
0
] |
2,145 |
ba2f8598ec7e107ac71786cf9191777a93ae2c7a
|
<mask token>
|
<mask token>
for i in sys.stdin:
i = float(i)
key = math.floor(i * 10)
print('%s\t%s' % (key, i))
|
import os
import sys
import csv
import math
for i in sys.stdin:
i = float(i)
key = math.floor(i * 10)
print('%s\t%s' % (key, i))
| null | null |
[
0,
1,
2
] |
2,146 |
d296e528d399ee772039777d139a1d8271711ee9
|
<mask token>
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request, 'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail', kwargs={'pk':
assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(name=request.POST.
get('company'))
mine, created = Mine.objects.get_or_create(name=request.POST.get(
'mine'), company=company, location=request.POST.get('location'))
assessment = Assessment.objects.create(mine=mine)
if request.user.is_authenticated:
assessment.user = request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(question=question, response=
self.get_response(value), assessment=assessment)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(response__assessment
=assessment).distinct()
for question in remaining_questions:
Response.objects.create(assessment=assessment, question=question)
|
<mask token>
class MineList(ListView):
<mask token>
<mask token>
class MineDetail(DetailView):
model = Mine
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request, 'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail', kwargs={'pk':
assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(name=request.POST.
get('company'))
mine, created = Mine.objects.get_or_create(name=request.POST.get(
'mine'), company=company, location=request.POST.get('location'))
assessment = Assessment.objects.create(mine=mine)
if request.user.is_authenticated:
assessment.user = request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(question=question, response=
self.get_response(value), assessment=assessment)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(response__assessment
=assessment).distinct()
for question in remaining_questions:
Response.objects.create(assessment=assessment, question=question)
|
<mask token>
class MineList(ListView):
model = Mine
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY
return context
class MineDetail(DetailView):
model = Mine
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request, 'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail', kwargs={'pk':
assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(name=request.POST.
get('company'))
mine, created = Mine.objects.get_or_create(name=request.POST.get(
'mine'), company=company, location=request.POST.get('location'))
assessment = Assessment.objects.create(mine=mine)
if request.user.is_authenticated:
assessment.user = request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(question=question, response=
self.get_response(value), assessment=assessment)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(response__assessment
=assessment).distinct()
for question in remaining_questions:
Response.objects.create(assessment=assessment, question=question)
|
<mask token>
class Home(View):
<mask token>
class MineList(ListView):
model = Mine
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY
return context
class MineDetail(DetailView):
model = Mine
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request, 'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail', kwargs={'pk':
assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(name=request.POST.
get('company'))
mine, created = Mine.objects.get_or_create(name=request.POST.get(
'mine'), company=company, location=request.POST.get('location'))
assessment = Assessment.objects.create(mine=mine)
if request.user.is_authenticated:
assessment.user = request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(question=question, response=
self.get_response(value), assessment=assessment)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(response__assessment
=assessment).distinct()
for question in remaining_questions:
Response.objects.create(assessment=assessment, question=question)
|
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView, View
from assessments.models import (Mine, Company,
QuestionCategory, Question, Assessment, Response)
class Home(View):
def get(self, request):
return render(request, 'home.html')
class MineList(ListView):
model = Mine
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['maps_api_key'] = settings.GOOGLEMAPS_API_KEY
return context
class MineDetail(DetailView):
model = Mine
class AssessmentList(ListView):
model = Assessment
class AssessmentDetail(DetailView):
model = Assessment
class AnswerQuestions(ListView):
model = Question
def post(self, request):
company, mine, assessment = self.get_assessment(
request)
for key, value in request.POST.items():
print(key, value)
self.create_response(key, value, assessment)
self.add_null_responses(assessment)
messages.success(request,
'Assessment Received; Thank You!')
return redirect(reverse('assessment_detail',
kwargs={'pk':assessment.id}))
def get_assessment(self, request):
company, created = Company.objects.get_or_create(
name=request.POST.get('company')
)
mine, created = Mine.objects.get_or_create(
name=request.POST.get('mine'),
company=company,
location=request.POST.get('location')
)
assessment = Assessment.objects.create(
mine=mine,
)
if request.user.is_authenticated:
assessment.user =request.user
assessment.save()
return company, mine, assessment
def create_response(self, key, value, assessment):
try:
question = Question.objects.get(id=int(key))
response = Response.objects.create(
question=question,
response=self.get_response(value),
assessment=assessment
)
except Exception as error:
print(error)
def get_response(self, response):
if response == 'True':
return True
else:
return False
def add_null_responses(self, assessment):
remaining_questions = Question.objects.exclude(
response__assessment=assessment).distinct()
for question in remaining_questions:
Response.objects.create(
assessment=assessment,
question=question,
)
|
[
11,
14,
16,
17,
20
] |
2,147 |
64368679aa2e387e25a36b2f3d0312a99b819e95
|
<mask token>
|
<mask token>
api.main()
|
from xrouter import api
api.main()
|
#!/usr/bin/env python
from xrouter import api
api.main()
| null |
[
0,
1,
2,
3
] |
2,148 |
49995e60b817e2c5a2ea7e85e4fe96ca95363cb2
|
<mask token>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
<mask token>
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<mask token>
|
<mask token>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
<mask token>
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<mask token>
|
<mask token>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
gammas = range(1, 2)
train_scores = []
test_scores = []
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 1, 1)
ax.plot(gammas, train_scores, label='Training score ', marker='+')
ax.plot(gammas, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_sigmoid_gamma ')
ax.set_xscale('log')
ax.set_xlabel('$\\gamma$')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
<mask token>
|
<mask token>
def test_linearSVC(*data):
X_train, X_test, y_train, y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train, X_test, y_train, y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s' % (cls.coef_, cls.intercept_))
print('Scors:%.2f' % cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
degrees = range(1, 2)
train_scores = []
test_scores = []
for degree in degrees:
cls = svm.SVC(kernel='poly', degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 3, 1)
ax.plot(degrees, train_scores, label='Training score ', marker='+')
ax.plot(degrees, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_poly_degree ')
ax.set_xlabel('p')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
cls = svm.SVC(C=1000.0, kernel='rbf', gamma=0.1, probability=True)
cls.fit(X_train, y_train)
print('Scors:%.2f' % cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
def grid_SVC_rbf(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
param_grid = {'C': [1000.0, 5000.0, 10000.0, 50000.0, 100000.0],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'), param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train, X_test, y_train, y_test = data
fig = plt.figure()
gammas = range(1, 2)
train_scores = []
test_scores = []
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid', gamma=gamma, coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f' % cls.score(X_test, y_test))
ax = fig.add_subplot(1, 1, 1)
ax.plot(gammas, train_scores, label='Training score ', marker='+')
ax.plot(gammas, test_scores, label='Testing score ', marker='o')
ax.set_title('SVC_sigmoid_gamma ')
ax.set_xscale('log')
ax.set_xlabel('$\\gamma$')
ax.set_ylabel('score')
ax.set_ylim(0, 1.05)
ax.legend(loc='best', framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST, target_dtype=np.
int16, features_dtype=np.float32, target_column=0)
test_SVC_sigmod(train_datas.data, test_datas.data, train_datas.target,
test_datas.target)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type=float, default=0.01, help=
'Initial learning rate.')
parser.add_argument('--max_steps', type=int, default=100000, help=
'Number of steps to run trainer.')
parser.add_argument('--percentage', type=float, default=0.99, help=
'Number of float for pca remain percentage.')
parser.add_argument('--hidden2', type=int, default=32, help=
'Number of units in hidden layer 2.')
parser.add_argument('--batch_size', type=int, default=1, help=
'Batch size. Must divide evenly into the dataset sizes.')
parser.add_argument('--input_data_dir', type=str, default=
'/home/freebirdweij/tf_works/invest', help=
'Directory to put the input data.')
parser.add_argument('--log_dir', type=str, default=
'/home/freebirdweij/tf_works/invest/logs', help=
'Directory to put the log data.')
parser.add_argument('--fake_data', default=False, help=
'If true, uses fake data for unit testing.', action='store_true')
FLAGS, unparsed = parser.parse_known_args()
main()
|
'''
Created on 2018-9-8
@author: weij
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
import numpy as np
from numpy import shape
from scipy import linalg
from sklearn import datasets,linear_model,cross_validation,svm
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import com.freebirdweij.goldanalyse.ml.data_util as base
import matplotlib.pyplot as plt
def test_linearSVC(*data):
X_train,X_test,y_train,y_test = data
cls = svm.LinearSVC()
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))
print('Scors:%.2f'%cls.score(X_test, y_test))
def test_SVC_linear(*data):
X_train,X_test,y_train,y_test = data
cls = svm.SVC(kernel='linear')
cls.fit(X_train, y_train)
print('Coefficients:%s,Intercept:%s'%(cls.coef_,cls.intercept_))
print('Scors:%.2f'%cls.score(X_test, y_test))
def test_SVC_poly(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
degrees = range(1,2)
train_scores=[]
test_scores=[]
for degree in degrees:
cls = svm.SVC(kernel='poly',degree=degree)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
ax=fig.add_subplot(1,3,1)
ax.plot(degrees,train_scores,label="Training score ",marker='+')
ax.plot(degrees,test_scores,label="Testing score ",marker='o')
ax.set_title("SVC_poly_degree ")
ax.set_xlabel("p")
ax.set_ylabel("score")
ax.set_ylim(0,1.05)
ax.legend(loc="best",framealpha=0.5)
plt.show()
def test_SVC_rbf(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
#gammas = range(1,2)
#train_scores=[]
#test_scores=[]
#for gamma in gammas:
cls = svm.SVC(C=1e3,kernel='rbf',gamma=0.1,probability=True)
cls.fit(X_train, y_train)
#train_scores.append(cls.score(X_train, y_train))
#test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
print('probability')
print(cls.predict(X_test))
return cls.predict_proba(X_test)
#ax=fig.add_subplot(1,1,1)
#ax.plot(gammas,train_scores,label="Training score ",marker='+')
#ax.plot(gammas,test_scores,label="Testing score ",marker='o')
#ax.set_title("SVC_rbf ")
#ax.set_xlabel(r"$\gamma$")
#ax.set_ylabel("score")
#ax.set_ylim(0,1.05)
#ax.legend(loc="best",framealpha=0.5)
#plt.show()
def grid_SVC_rbf(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
param_grid = {'C':[1e3,5e3,1e4,5e4,1e5],
'gamma':[0.0001,0.0005,0.001,0.005,0.01,0.1]}
cls = GridSearchCV(svm.SVC(kernel='rbf'),param_grid)
cls.fit(X_train, y_train)
print('Best estimotor by GridSearchCV:')
print(cls.best_estimator_)
def test_SVC_sigmod(*data):
X_train,X_test,y_train,y_test = data
fig = plt.figure()
### test degree ###
gammas = range(1,2)
train_scores=[]
test_scores=[]
for gamma in gammas:
cls = svm.SVC(kernel='sigmoid',gamma=gamma,coef0=0)
cls.fit(X_train, y_train)
train_scores.append(cls.score(X_train, y_train))
test_scores.append(cls.score(X_test, y_test))
print('Scors:%.2f'%cls.score(X_test, y_test))
ax=fig.add_subplot(1,1,1)
ax.plot(gammas,train_scores,label="Training score ",marker='+')
ax.plot(gammas,test_scores,label="Testing score ",marker='o')
ax.set_title("SVC_sigmoid_gamma ")
ax.set_xscale("log")
ax.set_xlabel(r"$\gamma$")
ax.set_ylabel("score")
ax.set_ylim(0,1.05)
ax.legend(loc="best",framealpha=0.5)
plt.show()
def main():
DATA_TRAIN = 'train-autd365-2018-8-31-day-high100-round-select2-0split.csv'
DATA_TEST = 'test-autd365-2018-8-31-day-high100-round-select2-0split.csv'
train_datas = base.load_csv_without_header(DATA_TRAIN,target_dtype=np.int16,
features_dtype=np.float32,target_column=0)
test_datas = base.load_csv_without_header(DATA_TEST,target_dtype=np.int16,
features_dtype=np.float32,target_column=0)
test_SVC_sigmod(train_datas.data,test_datas.data,train_datas.target,test_datas.target)
#pro_date = test_SVC_rbf(train_datas.data,test_datas.data,train_datas.target,test_datas.target)
#dataMat = input_datas.data
#print('dataMat:-----------------------')
#print(dataMat)
#pcaData = np.dot(dataMat,eig_vect)
#reconMat = np.dot(pcaData,eig_vect.T)+mean_v #Reconstructed datas.
#print('k:-----------------------')
#print(k)
#print('pcaData:-----------------------')
#print(pcaData)
#print('reconMat:-----------------------')
#print(reconMat)
#base.write_a_dataset_to_a_csv('audt365-2018-2-21-day-class21-high100-round-test-svm.csv', pro_date)
#base.write_a_dataset_to_a_csv('hjxh365-2018-4-16-day-plus-norm-clear-pca9999-recn.csv', reconMat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='Initial learning rate.'
)
parser.add_argument(
'--max_steps',
type=int,
default=100000,
help='Number of steps to run trainer.'
)
parser.add_argument(
'--percentage',
type=float,
default=0.99,
help='Number of float for pca remain percentage.'
)
parser.add_argument(
'--hidden2',
type=int,
default=32,
help='Number of units in hidden layer 2.'
)
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='Batch size. Must divide evenly into the dataset sizes.'
)
parser.add_argument(
'--input_data_dir',
type=str,
default='/home/freebirdweij/tf_works/invest',
help='Directory to put the input data.'
)
parser.add_argument(
'--log_dir',
type=str,
default='/home/freebirdweij/tf_works/invest/logs',
help='Directory to put the log data.'
)
parser.add_argument(
'--fake_data',
default=False,
help='If true, uses fake data for unit testing.',
action='store_true'
)
FLAGS, unparsed = parser.parse_known_args()
main()
|
[
5,
6,
7,
8,
10
] |
2,149 |
16215ee42c4ea284dca0ebb7372fef04c0cc54b9
|
<mask token>
def segment_ts():
ts_len = len(x1)
mod = ts_len % window_size
rnge = 0
if skip_offset == 0:
ts_len = int((ts_len - mod - window_size) / 1)
rnge = int(ts_len / window_size)
else:
ts_len = int(math.ceil((ts_len - window_size) / skip_offset))
rnge = int(ts_len)
curr_count = 0
words = list()
indices = list()
complete_indices = list()
for i in range(0, rnge):
sub_section = x1[curr_count:curr_count + window_size]
sub_section = normalize(sub_section)
curr_word = ''
chunk_size = int(len(sub_section) / word_lenth)
num = 0
curr_letter = ''
for j in range(0, word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter = alphabetize_ts(chunk)
curr_word += str(curr_letter)
complete_indices.append(curr_count)
num += chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list = []
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[
len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count = curr_count + skip_offset - 1
if i == 0:
df_sax = temp_df.copy()
else:
df_sax = df_sax.append(temp_df, ignore_index=True)
return words, indices, df_sax
<mask token>
def complete_word():
complete_word = list()
complete_indices = indices
""" Simillar Words """
complete_word = alphabetize
sax = defaultdict(list)
for i in range(0, len(complete_word)):
if len(complete_word[i]) == word_lenth:
sax[complete_word[i]].append(complete_indices[i])
return sax
def Compare_Shape():
simillar_word = complete_word()
map_keys = defaultdict(list)
map_indices = defaultdict(list)
for key_i in simillar_word:
temp_list = list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist = hamming_distance(key_i, key_j)
if dist == ham_distance and key_i != key_j:
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return map_keys, map_indices
<mask token>
def dtw_test2():
df_dtw_prep = df_sax
dtw_df = pd.DataFrame()
for k, v in compare_list.items():
v_temp = str(v)[2:-2]
v1 = [int(s) for s in v_temp.split(',')]
for i in range(0, len(v1) - 1):
for j in range(i, len(v1)):
if v1[i] != v1[j]:
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x = np.array(sub_section1).reshape(-1, 1)
y = np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,
y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k, index1, index2,
sub_section1, sub_section2, dtw_value]], columns=[
'keyy', 'index1', 'index2', 'sub_section1',
'sub_section2', 'dtw_value'])
dtw_df = dtw_df.append(temp_df, ignore_index=True)
return dtw_df
<mask token>
|
<mask token>
def segment_ts():
ts_len = len(x1)
mod = ts_len % window_size
rnge = 0
if skip_offset == 0:
ts_len = int((ts_len - mod - window_size) / 1)
rnge = int(ts_len / window_size)
else:
ts_len = int(math.ceil((ts_len - window_size) / skip_offset))
rnge = int(ts_len)
curr_count = 0
words = list()
indices = list()
complete_indices = list()
for i in range(0, rnge):
sub_section = x1[curr_count:curr_count + window_size]
sub_section = normalize(sub_section)
curr_word = ''
chunk_size = int(len(sub_section) / word_lenth)
num = 0
curr_letter = ''
for j in range(0, word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter = alphabetize_ts(chunk)
curr_word += str(curr_letter)
complete_indices.append(curr_count)
num += chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list = []
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[
len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count = curr_count + skip_offset - 1
if i == 0:
df_sax = temp_df.copy()
else:
df_sax = df_sax.append(temp_df, ignore_index=True)
return words, indices, df_sax
<mask token>
def complete_word():
complete_word = list()
complete_indices = indices
""" Simillar Words """
complete_word = alphabetize
sax = defaultdict(list)
for i in range(0, len(complete_word)):
if len(complete_word[i]) == word_lenth:
sax[complete_word[i]].append(complete_indices[i])
return sax
def Compare_Shape():
simillar_word = complete_word()
map_keys = defaultdict(list)
map_indices = defaultdict(list)
for key_i in simillar_word:
temp_list = list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist = hamming_distance(key_i, key_j)
if dist == ham_distance and key_i != key_j:
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return map_keys, map_indices
<mask token>
def dtw_test2():
df_dtw_prep = df_sax
dtw_df = pd.DataFrame()
for k, v in compare_list.items():
v_temp = str(v)[2:-2]
v1 = [int(s) for s in v_temp.split(',')]
for i in range(0, len(v1) - 1):
for j in range(i, len(v1)):
if v1[i] != v1[j]:
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x = np.array(sub_section1).reshape(-1, 1)
y = np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,
y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k, index1, index2,
sub_section1, sub_section2, dtw_value]], columns=[
'keyy', 'index1', 'index2', 'sub_section1',
'sub_section2', 'dtw_value'])
dtw_df = dtw_df.append(temp_df, ignore_index=True)
return dtw_df
<mask token>
print('Time: ', stop - start)
|
<mask token>
start = timeit.default_timer()
data = pd.read_csv('test_data2.csv', sep=',', header=None)
x1 = data.iloc[1:, 1].values.flatten()
x1 = np.asfarray(x1, float)
y_alphabet_size = 4
word_lenth = 3
window_size = round(len(x1) * 10 / 100)
skip_offset = round(window_size / 2)
ham_distance = 1
epsilon = 1e-06
def segment_ts():
ts_len = len(x1)
mod = ts_len % window_size
rnge = 0
if skip_offset == 0:
ts_len = int((ts_len - mod - window_size) / 1)
rnge = int(ts_len / window_size)
else:
ts_len = int(math.ceil((ts_len - window_size) / skip_offset))
rnge = int(ts_len)
curr_count = 0
words = list()
indices = list()
complete_indices = list()
for i in range(0, rnge):
sub_section = x1[curr_count:curr_count + window_size]
sub_section = normalize(sub_section)
curr_word = ''
chunk_size = int(len(sub_section) / word_lenth)
num = 0
curr_letter = ''
for j in range(0, word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter = alphabetize_ts(chunk)
curr_word += str(curr_letter)
complete_indices.append(curr_count)
num += chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list = []
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[
len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count = curr_count + skip_offset - 1
if i == 0:
df_sax = temp_df.copy()
else:
df_sax = df_sax.append(temp_df, ignore_index=True)
return words, indices, df_sax
alphabetize, indices, df_sax = segment_ts()
<mask token>
def complete_word():
complete_word = list()
complete_indices = indices
""" Simillar Words """
complete_word = alphabetize
sax = defaultdict(list)
for i in range(0, len(complete_word)):
if len(complete_word[i]) == word_lenth:
sax[complete_word[i]].append(complete_indices[i])
return sax
def Compare_Shape():
simillar_word = complete_word()
map_keys = defaultdict(list)
map_indices = defaultdict(list)
for key_i in simillar_word:
temp_list = list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist = hamming_distance(key_i, key_j)
if dist == ham_distance and key_i != key_j:
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return map_keys, map_indices
compare_strings, compare_list = Compare_Shape()
def dtw_test2():
df_dtw_prep = df_sax
dtw_df = pd.DataFrame()
for k, v in compare_list.items():
v_temp = str(v)[2:-2]
v1 = [int(s) for s in v_temp.split(',')]
for i in range(0, len(v1) - 1):
for j in range(i, len(v1)):
if v1[i] != v1[j]:
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x = np.array(sub_section1).reshape(-1, 1)
y = np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,
y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k, index1, index2,
sub_section1, sub_section2, dtw_value]], columns=[
'keyy', 'index1', 'index2', 'sub_section1',
'sub_section2', 'dtw_value'])
dtw_df = dtw_df.append(temp_df, ignore_index=True)
return dtw_df
dt_test = dtw_test2()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
<mask token>
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import itertools
from dtw import dtw
import timeit
from helper_functions import normalize, alphabetize_ts, hamming_distance
<mask token>
start = timeit.default_timer()
data = pd.read_csv('test_data2.csv', sep=',', header=None)
x1 = data.iloc[1:, 1].values.flatten()
x1 = np.asfarray(x1, float)
y_alphabet_size = 4
word_lenth = 3
window_size = round(len(x1) * 10 / 100)
skip_offset = round(window_size / 2)
ham_distance = 1
epsilon = 1e-06
def segment_ts():
ts_len = len(x1)
mod = ts_len % window_size
rnge = 0
if skip_offset == 0:
ts_len = int((ts_len - mod - window_size) / 1)
rnge = int(ts_len / window_size)
else:
ts_len = int(math.ceil((ts_len - window_size) / skip_offset))
rnge = int(ts_len)
curr_count = 0
words = list()
indices = list()
complete_indices = list()
for i in range(0, rnge):
sub_section = x1[curr_count:curr_count + window_size]
sub_section = normalize(sub_section)
curr_word = ''
chunk_size = int(len(sub_section) / word_lenth)
num = 0
curr_letter = ''
for j in range(0, word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter = alphabetize_ts(chunk)
curr_word += str(curr_letter)
complete_indices.append(curr_count)
num += chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list = []
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[
len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count = curr_count + skip_offset - 1
if i == 0:
df_sax = temp_df.copy()
else:
df_sax = df_sax.append(temp_df, ignore_index=True)
return words, indices, df_sax
alphabetize, indices, df_sax = segment_ts()
<mask token>
def complete_word():
complete_word = list()
complete_indices = indices
""" Simillar Words """
complete_word = alphabetize
sax = defaultdict(list)
for i in range(0, len(complete_word)):
if len(complete_word[i]) == word_lenth:
sax[complete_word[i]].append(complete_indices[i])
return sax
def Compare_Shape():
simillar_word = complete_word()
map_keys = defaultdict(list)
map_indices = defaultdict(list)
for key_i in simillar_word:
temp_list = list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist = hamming_distance(key_i, key_j)
if dist == ham_distance and key_i != key_j:
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return map_keys, map_indices
compare_strings, compare_list = Compare_Shape()
def dtw_test2():
df_dtw_prep = df_sax
dtw_df = pd.DataFrame()
for k, v in compare_list.items():
v_temp = str(v)[2:-2]
v1 = [int(s) for s in v_temp.split(',')]
for i in range(0, len(v1) - 1):
for j in range(i, len(v1)):
if v1[i] != v1[j]:
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x = np.array(sub_section1).reshape(-1, 1)
y = np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x,
y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k, index1, index2,
sub_section1, sub_section2, dtw_value]], columns=[
'keyy', 'index1', 'index2', 'sub_section1',
'sub_section2', 'dtw_value'])
dtw_df = dtw_df.append(temp_df, ignore_index=True)
return dtw_df
dt_test = dtw_test2()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 3 17:16:12 2019
@author: Meagatron
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
import math
import itertools
from dtw import dtw
import timeit
from helper_functions import normalize,alphabetize_ts,hamming_distance
"""------------- Intialization ------------- """
start = timeit.default_timer()
data = pd.read_csv('test_data2.csv', sep=',', header=None)
x1 = data.iloc[1:,1].values.flatten()
x1=np.asfarray(x1,float)
y_alphabet_size=4
word_lenth=3
window_size=round( len(x1) *10 /100 )
skip_offset=round(window_size/2)
ham_distance=1
epsilon = 1e-6
def segment_ts():
ts_len=len(x1)
mod = ts_len%window_size
rnge=0
if(skip_offset==0):
ts_len=int((ts_len-mod-window_size)/1)
rnge=int(ts_len/window_size)
else:
ts_len=int(math.ceil((ts_len-window_size)/skip_offset))
rnge=int(ts_len)
curr_count=0
words=list()
indices=list()
complete_indices=list()
for i in range(0, rnge):
sub_section = x1[curr_count:(curr_count+window_size)]
sub_section=normalize(sub_section)
curr_word=""
chunk_size=int(len(sub_section)/word_lenth)
num=0
curr_letter=""
for j in range(0,word_lenth):
chunk = sub_section[num:num + chunk_size]
curr_letter=alphabetize_ts(chunk)
curr_word+=str(curr_letter)
complete_indices.append(curr_count)
num+=chunk_size
words.append(curr_word)
indices.append(curr_count)
temp_list=[]
temp_list.append(sub_section)
temp_df = pd.DataFrame()
temp_df.insert(loc=0, column='sub_section', value=temp_list)
temp_df.insert(loc=0, column='keys', value=curr_word)
temp_df.insert(loc=0, column='position', value=sorted(sub_section)[len(sub_section) // 2])
temp_df.insert(loc=0, column='scale_high', value=np.max(sub_section))
temp_df.insert(loc=0, column='scale_low', value=np.min(sub_section))
temp_df.insert(loc=0, column='indices', value=curr_count)
curr_count=curr_count+skip_offset-1
if(i==0):
df_sax =temp_df.copy()
else:
df_sax=df_sax.append(temp_df, ignore_index=True)
return (words,indices,df_sax)
alphabetize,indices,df_sax=segment_ts()
""" Complete Words """
def complete_word():
complete_word=list()
complete_indices=indices
""" Simillar Words """
complete_word=alphabetize
sax = defaultdict(list)
for i in range(0,len(complete_word)):
if(len(complete_word[i])==word_lenth):
sax[complete_word[i]].append(complete_indices[i])
return sax
#alphabetize1,indices1,df_sax=segment_ts()
def Compare_Shape():
simillar_word=complete_word()
map_keys = defaultdict(list)
map_indices=defaultdict(list)
for key_i in simillar_word:
temp_list=list()
temp_list.append(simillar_word.get(key_i))
map_keys[key_i].append(key_i)
for key_j in simillar_word:
dist=hamming_distance(key_i, key_j)
if(dist==ham_distance and key_i !=key_j):
map_keys[key_i].append(key_j)
temp_list.append(simillar_word.get(key_j))
else:
map_keys[key_i].append([])
tempp = list(itertools.chain(*temp_list))
map_indices[key_i].append(tempp)
return (map_keys,map_indices)
compare_strings,compare_list=Compare_Shape()
def dtw_test2 ():
df_dtw_prep=df_sax
dtw_df=pd.DataFrame()
for k, v in compare_list.items():
v_temp=str(v)[2:-2]
v1=[int(s) for s in v_temp.split(',')]
for i in range(0,len(v1)-1):
for j in range(i,len(v1)):
if(v1[i] != v1[j]):
row1 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[i]]
row2 = df_dtw_prep.loc[df_dtw_prep['indices'] == v1[j]]
sub_section1 = row1.iloc[0]['sub_section']
sub_section2 = row2.iloc[0]['sub_section']
index1 = row1.iloc[0]['indices']
index2 = row2.iloc[0]['indices']
x=np.array(sub_section1).reshape(-1, 1)
y=np.array(sub_section2).reshape(-1, 1)
euclidean_norm = lambda x, y: np.abs(x - y)
dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x, y, dist=euclidean_norm)
temp_df = pd.DataFrame([[k,index1,index2,sub_section1,sub_section2,dtw_value]],
columns=['keyy','index1','index2','sub_section1','sub_section2','dtw_value'])
dtw_df=dtw_df.append(temp_df,ignore_index=True)
return(dtw_df)
dt_test=dtw_test2 ()
stop = timeit.default_timer()
print('Time: ', stop - start)
|
[
4,
5,
6,
7,
8
] |
2,150 |
9aee715e976db632f0829a06cb9e0101c90512be
|
<mask token>
|
<mask token>
fout.close()
<mask token>
if not drive:
drive = 'C:'
<mask token>
os.system(runString)
<mask token>
fout.close()
<mask token>
for index, line in enumerate(lines):
panelData.append(np.array(list(map(float, lines[index].split()))))
<mask token>
for index in panelNums:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], panelData[panelData[:, 0] ==
index][:, np.array([False, False, True, False, False])], panelData[
panelData[:, 0] == index][:, np.array([False, False, False, True,
False])])
for index in panelNums:
symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,
False, False, True])]
if symFlag == 0 or symFlag == 2:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], -1 * panelData[panelData[:,
0] == index][:, np.array([False, False, True, False, False])],
panelData[panelData[:, 0] == index][:, np.array([False, False,
False, True, False])])
ax.grid()
ax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')
ax.xaxis.label.set_size(16)
ax.yaxis.label.set_size(16)
ax.zaxis.label.set_size(16)
<mask token>
ax.set_aspect('equal')
<mask token>
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.show()
|
<mask token>
fout = open('path.txt', 'r')
userExePath = fout.readline()
fout.close()
drive, exePath = userExePath.split('\\', 1)
if not drive:
drive = 'C:'
runString = drive + ' && cd \\' + exePath + ' && vorlax.exe'
os.system(runString)
fout = open(drive + '\\' + exePath + '\\VORLAX.WIRE', 'r')
lines = fout.readlines()
fout.close()
panelData = []
for index, line in enumerate(lines):
panelData.append(np.array(list(map(float, lines[index].split()))))
panelData = np.array(panelData)
panelNums = np.unique(panelData[0:, 0:1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index in panelNums:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], panelData[panelData[:, 0] ==
index][:, np.array([False, False, True, False, False])], panelData[
panelData[:, 0] == index][:, np.array([False, False, False, True,
False])])
for index in panelNums:
symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,
False, False, True])]
if symFlag == 0 or symFlag == 2:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], -1 * panelData[panelData[:,
0] == index][:, np.array([False, False, True, False, False])],
panelData[panelData[:, 0] == index][:, np.array([False, False,
False, True, False])])
ax.grid()
ax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')
ax.xaxis.label.set_size(16)
ax.yaxis.label.set_size(16)
ax.zaxis.label.set_size(16)
x = panelData[:, 1]
y = panelData[:, 2]
negativey = -1 * panelData[:, 2]
y = np.concatenate((y, negativey), axis=0)
z = panelData[:, 3]
ax.set_aspect('equal')
max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]
).max() / 2.0
mid_x = (x.max() + x.min()) * 0.5
mid_y = (y.max() + y.min()) * 0.5
mid_z = (z.max() + z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.show()
|
<mask token>
import os
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
fout = open('path.txt', 'r')
userExePath = fout.readline()
fout.close()
drive, exePath = userExePath.split('\\', 1)
if not drive:
drive = 'C:'
runString = drive + ' && cd \\' + exePath + ' && vorlax.exe'
os.system(runString)
fout = open(drive + '\\' + exePath + '\\VORLAX.WIRE', 'r')
lines = fout.readlines()
fout.close()
panelData = []
for index, line in enumerate(lines):
panelData.append(np.array(list(map(float, lines[index].split()))))
panelData = np.array(panelData)
panelNums = np.unique(panelData[0:, 0:1])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for index in panelNums:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], panelData[panelData[:, 0] ==
index][:, np.array([False, False, True, False, False])], panelData[
panelData[:, 0] == index][:, np.array([False, False, False, True,
False])])
for index in panelNums:
symFlag = panelData[panelData[:, 0] == index][0, np.array([False, False,
False, False, True])]
if symFlag == 0 or symFlag == 2:
ax.plot_wireframe(panelData[panelData[:, 0] == index][:, np.array([
False, True, False, False, False])], -1 * panelData[panelData[:,
0] == index][:, np.array([False, False, True, False, False])],
panelData[panelData[:, 0] == index][:, np.array([False, False,
False, True, False])])
ax.grid()
ax.set(ylabel='y-in', xlabel='x-in', zlabel='z-in', title='')
ax.xaxis.label.set_size(16)
ax.yaxis.label.set_size(16)
ax.zaxis.label.set_size(16)
x = panelData[:, 1]
y = panelData[:, 2]
negativey = -1 * panelData[:, 2]
y = np.concatenate((y, negativey), axis=0)
z = panelData[:, 3]
ax.set_aspect('equal')
max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]
).max() / 2.0
mid_x = (x.max() + x.min()) * 0.5
mid_y = (y.max() + y.min()) * 0.5
mid_z = (z.max() + z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.show()
|
# -*- coding: utf-8 -*-
"""
VorRun
Runs Vorlax and plots wireframe output from Vorlax
(https://github.com/GalaxyHobo/VORLAX)
NOTE! Type: "%matplotlib auto" in iPython console to
switch to interactive plots, or "%matplotlib inline"
to switch to inline, in the console.
NOTE! Reads path to Vorlax .exe in "path.txt" file that resides in
same directory as vorRun.py. The path in that file must be on the
first line and begin with drive letter + colon, or "\". Assumes
C-drive if path begins with "\".
Lance Bays
"""
import os
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# Establish working directory with exe...
# Copy & paste absolute path on Local machine here within double quotes
# Read path to working directory
fout = open("path.txt", 'r')
userExePath=fout.readline()
fout.close()
# Split drive Letter from path
drive, exePath = userExePath.split("\\", 1)
# Handle case where user doesn't include drive in path —
# we will assume it's on the C drive.
if not drive: drive="C:"
# Run program
# Command-line instructions to change drive & directory, and run program
runString = drive + " && cd \\" + exePath + " && vorlax.exe"
os.system( runString)
# Read output file
fout = open(drive + "\\" + exePath + "\\VORLAX.WIRE", 'r')
lines=fout.readlines()
fout.close()
# Convert to numpy array
panelData=[]
for index, line in enumerate(lines):
panelData.append(np.array(list(map(float,lines[index].split()))))
panelData=np.array(panelData)
# Determine array of unique panel ID's
panelNums = np.unique(panelData[0:,0:1])
# Add subplot
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Plot the Vorlax wireframe (one side)
for index in panelNums:
ax.plot_wireframe(
panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])],
panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])],
panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])])
# Plot the mirror image (if symmetry is indicated in wire file)
for index in panelNums:
symFlag=panelData[panelData[:,0]==index][0,np.array([False,False,False,False,True])]
if symFlag==0 or symFlag==2:
ax.plot_wireframe(
panelData[panelData[:,0]==index][:,np.array([False,True,False,False,False])],
-1*panelData[panelData[:,0]==index][:,np.array([False,False,True,False,False])],
panelData[panelData[:,0]==index][:,np.array([False,False,False,True,False])])
# Format plot
ax.grid()
ax.set(ylabel='y-in',
xlabel='x-in',
zlabel='z-in',
title='')
ax.xaxis.label.set_size(16)
ax.yaxis.label.set_size(16)
ax.zaxis.label.set_size(16)
# Create super-set of data to establish ranges
x=panelData[:,1]
y=panelData[:,2]
negativey = -1 * panelData[:,2]
y=np.concatenate((y, negativey), axis=0)
z=panelData[:,3]
# Set equal scales on axes
ax.set_aspect('equal')
# Set ranges for plot
max_range = np.array([x.max() - x.min(),
y.max() - y.min(),
z.max() - z.min()]).max() / 2.0
# Compute midpoints in each direction
mid_x = (x.max() + x.min()) * 0.5
mid_y = (y.max() + y.min()) * 0.5
mid_z = (z.max() + z.min()) * 0.5
# Set final ranges
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
plt.show()
|
[
0,
1,
2,
3,
4
] |
2,151 |
aa15f684d23d97a45a416b1fdcfb192710ebb56f
|
<mask token>
|
<mask token>
if (sum(prices) - prices[k]) // 2 == taken:
print('Bon Appetit')
else:
print(taken - (sum(prices) - prices[k]) // 2)
|
n, k = map(int, input().split())
prices = [int(temp) for temp in input().split()]
taken = int(input())
if (sum(prices) - prices[k]) // 2 == taken:
print('Bon Appetit')
else:
print(taken - (sum(prices) - prices[k]) // 2)
|
# https://www.hackerrank.com/challenges/bon-appetit
n, k = map(int, input().split())
prices = [int(temp) for temp in input().split()]
taken = int(input())
if (sum(prices) - prices[k]) // 2 == taken:
print("Bon Appetit")
else:
print(taken - (sum(prices) - prices[k])// 2)
| null |
[
0,
1,
2,
3
] |
2,152 |
3553fa72cb831f82a1030b9eadc9594eee1d1422
|
<mask token>
class Guest:
<mask token>
def parked_and_linkedplatform_value(self):
boolean, linkedplatform = (self.CarRotationManager.
check_if_guest_parked(self))
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
<mask token>
def park_car(self):
self.parked_and_linkedplatform_value()
if self.parked == True:
print('Your car is already parked!\n')
return
platform = self.CarRotationManager.return_empty_platform()
if platform == None:
return -1
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self)
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms + 1)
print('Your ' + self.Car.model + ' has been parked!\n')
now = datetime.now()
array = str(now).split()
string_into_file = array[0] + '@' + array[1]
self.controlboard.add_guest_to_file(self, string_into_file)
self.Start = string_into_file
|
<mask token>
class Guest:
<mask token>
def parked_and_linkedplatform_value(self):
boolean, linkedplatform = (self.CarRotationManager.
check_if_guest_parked(self))
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
def request_car(self):
self.parked_and_linkedplatform_value()
if self.parked == False:
print('Your car is not parked!\n')
return
pos = self.CarRotationManager.get_platform_position(self)
if pos == -1:
print('Your car is not parked!\n')
return
self.CarRotationManager.return_platform_to_base(pos)
self.CarRotationManager.release_car(self.linkedplatform)
self.parked = False
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms - 1)
print('Your ' + self.Car.model + ' has been released.')
print('Have a great day ' + self.Name + '!\n')
self.controlboard.remove_guest_from_file(self)
def park_car(self):
self.parked_and_linkedplatform_value()
if self.parked == True:
print('Your car is already parked!\n')
return
platform = self.CarRotationManager.return_empty_platform()
if platform == None:
return -1
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self)
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms + 1)
print('Your ' + self.Car.model + ' has been parked!\n')
now = datetime.now()
array = str(now).split()
string_into_file = array[0] + '@' + array[1]
self.controlboard.add_guest_to_file(self, string_into_file)
self.Start = string_into_file
|
<mask token>
class Guest:
def __init__(self, Name, FamilyName, Car, controlboard,
CarRotationManager, ID=0, linkedplatform=None, Start=0):
self.Name = Name
self.FamilyName = FamilyName
self.Car = Car
self.controlboard = controlboard
self.CarRotationManager = CarRotationManager
if ID == 0:
self.uniqueID = controlboard.set_id()
else:
self.uniqueID = ID
self.parked = False
self.linkedplatform = None
self.Start = Start
def parked_and_linkedplatform_value(self):
boolean, linkedplatform = (self.CarRotationManager.
check_if_guest_parked(self))
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
def request_car(self):
self.parked_and_linkedplatform_value()
if self.parked == False:
print('Your car is not parked!\n')
return
pos = self.CarRotationManager.get_platform_position(self)
if pos == -1:
print('Your car is not parked!\n')
return
self.CarRotationManager.return_platform_to_base(pos)
self.CarRotationManager.release_car(self.linkedplatform)
self.parked = False
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms - 1)
print('Your ' + self.Car.model + ' has been released.')
print('Have a great day ' + self.Name + '!\n')
self.controlboard.remove_guest_from_file(self)
def park_car(self):
self.parked_and_linkedplatform_value()
if self.parked == True:
print('Your car is already parked!\n')
return
platform = self.CarRotationManager.return_empty_platform()
if platform == None:
return -1
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self)
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms + 1)
print('Your ' + self.Car.model + ' has been parked!\n')
now = datetime.now()
array = str(now).split()
string_into_file = array[0] + '@' + array[1]
self.controlboard.add_guest_to_file(self, string_into_file)
self.Start = string_into_file
|
from datetime import datetime
class Guest:
def __init__(self, Name, FamilyName, Car, controlboard,
CarRotationManager, ID=0, linkedplatform=None, Start=0):
self.Name = Name
self.FamilyName = FamilyName
self.Car = Car
self.controlboard = controlboard
self.CarRotationManager = CarRotationManager
if ID == 0:
self.uniqueID = controlboard.set_id()
else:
self.uniqueID = ID
self.parked = False
self.linkedplatform = None
self.Start = Start
def parked_and_linkedplatform_value(self):
boolean, linkedplatform = (self.CarRotationManager.
check_if_guest_parked(self))
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
def request_car(self):
self.parked_and_linkedplatform_value()
if self.parked == False:
print('Your car is not parked!\n')
return
pos = self.CarRotationManager.get_platform_position(self)
if pos == -1:
print('Your car is not parked!\n')
return
self.CarRotationManager.return_platform_to_base(pos)
self.CarRotationManager.release_car(self.linkedplatform)
self.parked = False
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms - 1)
print('Your ' + self.Car.model + ' has been released.')
print('Have a great day ' + self.Name + '!\n')
self.controlboard.remove_guest_from_file(self)
def park_car(self):
self.parked_and_linkedplatform_value()
if self.parked == True:
print('Your car is already parked!\n')
return
platform = self.CarRotationManager.return_empty_platform()
if platform == None:
return -1
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self)
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = (self.
CarRotationManager.occupiedPlatforms + 1)
print('Your ' + self.Car.model + ' has been parked!\n')
now = datetime.now()
array = str(now).split()
string_into_file = array[0] + '@' + array[1]
self.controlboard.add_guest_to_file(self, string_into_file)
self.Start = string_into_file
|
from datetime import datetime
class Guest:
def __init__(self, Name, FamilyName, Car, controlboard,
CarRotationManager, ID=0, linkedplatform=None,Start=0): # --Initializing Guest credentials/info---
self.Name = Name
self.FamilyName = FamilyName
self.Car = Car
self.controlboard = controlboard
self.CarRotationManager = CarRotationManager
if ID == 0: # In this case, the guest would be a new guest, so when we register him as a guest we don't give him an ID, and we ask the controlboard to generate the ID
self.uniqueID = controlboard.set_id() # ----calling controlboard class to set ID---unique ID given by control board/decision engine
else: # In this case, the guest would have already parked before and he would already have an ID, so instead of generating a new ID we just give him his old one
self.uniqueID = ID
self.parked = False # Boolean variable which indicates if guest is parked or not
self.linkedplatform = None # Variable containing the platform where the guest's car is parked
self.Start=Start # This is the time when the guest parks
def parked_and_linkedplatform_value(self): # This function checks if the guest is parked and sets the values of linkedplatform and parked accordingly
(boolean, linkedplatform) = self.CarRotationManager.check_if_guest_parked(self)
if boolean == True:
self.parked = True
self.linkedplatform = linkedplatform
else:
self.parked = False
self.linkedplatform = None
def request_car(self): # Function that releases the car if it is parked
self.parked_and_linkedplatform_value()
if self.parked == False:
print("Your car is not parked!\n")
return
pos = self.CarRotationManager.get_platform_position(self) # Get the car's current position in the parking
if (pos == -1):
print("Your car is not parked!\n")
return
self.CarRotationManager.return_platform_to_base(pos) # Move the car to the base position
self.CarRotationManager.release_car(self.linkedplatform) # Release the car
self.parked = False
self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms - 1
print("Your " + self.Car.model + " has been released.")
print("Have a great day " + self.Name + "!\n")
self.controlboard.remove_guest_from_file(self) # We remove the guest from the file once his car is not parked anymore
def park_car(self): # Function that parks the guest's car if it's not already parked
self.parked_and_linkedplatform_value()
if (self.parked == True):
print("Your car is already parked!\n")
return
platform = self.CarRotationManager.return_empty_platform() # FOUND CLOSEST EMPTY PLATFORM
if (platform == None):
return -1 # PARKING IS FULL
self.CarRotationManager.return_platform_to_base(platform.Position)
platform.link(self) # NOW USER'S CAR IS PARKED ON BASE PLATFORM
self.linkedplatform = platform
self.parked = True
self.CarRotationManager.occupiedPlatforms = self.CarRotationManager.occupiedPlatforms + 1
print("Your " + self.Car.model + " has been parked!\n")
now = datetime.now() # Get the current time, i.e when the user parks his car
array = str(now).split()
string_into_file = array[0] + "@" + array[1]
self.controlboard.add_guest_to_file(self,string_into_file) # Add the current time (when the user parked) next to his information in the guest file
self.Start=string_into_file
|
[
3,
4,
5,
6,
7
] |
2,153 |
879f7503f7f427f92109024b4646d1dc7f15d63d
|
<mask token>
|
<mask token>
print('YES', 'NO')[max(mat.count(str(i)) for i in xrange(1, 10)) > K * 2]
|
K = input()
mat = ''.join(raw_input() for i in xrange(4))
print('YES', 'NO')[max(mat.count(str(i)) for i in xrange(1, 10)) > K * 2]
|
K = input()
mat = "".join(raw_input() for i in xrange(4))
print ("YES", "NO")[max(mat.count(str(i)) for i in xrange(1, 10)) > K*2]
| null |
[
0,
1,
2,
3
] |
2,154 |
076e10b3741542b7137f6ac517dba482f545b123
|
<mask token>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
<mask token>
|
<mask token>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
<mask token>
|
<mask token>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
def coffee():
pound = eval(input('enter the amount of pounds purchased: '))
cost = pound * 10.5 + pound * 0.86 + 1.5
print('The total cost of coffee are', cost)
<mask token>
|
<mask token>
def calc_rec_area():
length = eval(input('Enter the length: '))
width = eval(input('Enter the width: '))
area = length * width
print('Area =', area)
def calc_rec_vol():
lengthh = eval(input('Enter the length: '))
widthh = eval(input('Enter the width: '))
heighth = eval(input('Enter the height: '))
volume = lengthh * widthh * heighth
print('Volume =', volume)
def shot_percentage():
shotm = eval(input('enter the shots made: '))
shott = eval(input('enter the total shots: '))
shotper = shotm / shott
print('Shot percentage = ', shotper)
def coffee():
pound = eval(input('enter the amount of pounds purchased: '))
cost = pound * 10.5 + pound * 0.86 + 1.5
print('The total cost of coffee are', cost)
def kilometers_to_miles():
"""1 mile = 1.61 kilometers"""
miles = eval(input('enter the amount of miles driven: '))
driven = miles * 1.61
print('The amount of kilometers driven are: ', driven)
|
"""
Name: Thomas Scola
lab1.py
Problem: This function calculates the area of a rectangle
"""
'''def calc_area():'''
def calc_rec_area():
length = eval(input("Enter the length: "))
width = eval(input("Enter the width: "))
area = length * width
print("Area =", area)
def calc_rec_vol():
lengthh = eval(input("Enter the length: "))
widthh = eval(input("Enter the width: "))
heighth = eval(input("Enter the height: "))
volume = lengthh * widthh * heighth
print("Volume =", volume)
def shot_percentage():
shotm = eval(input("enter the shots made: "))
shott = eval(input("enter the total shots: "))
shotper = shotm / shott
print("Shot percentage = ", shotper)
def coffee():
pound = eval(input("enter the amount of pounds purchased: "))
cost = (pound * 10.50) + (pound * 0.86) + 1.50
print("The total cost of coffee are", cost)
def kilometers_to_miles():
"""1 mile = 1.61 kilometers"""
miles = eval(input("enter the amount of miles driven: "))
driven = miles * 1.61
print("The amount of kilometers driven are: ", driven)
|
[
2,
3,
4,
5,
6
] |
2,155 |
0e3bf0ddd654b92b2cd962a2f3935c639eeb0695
|
<mask token>
|
<mask token>
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
<mask token>
|
<mask token>
for i in range(n):
for j in range(n):
if graph[i][j] == 2:
graph[i][j] = 0
virus_lst.append((i, j))
<mask token>
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
<mask token>
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
<mask token>
for r in result:
if r != -1:
time = min(time, r)
flag = True
print(time if flag else -1)
|
<mask token>
input = sys.stdin.readline
<mask token>
n, m = map(int, input().split())
graph = [list(map(int, input().split())) for i in range(n)]
virus_lst = []
for i in range(n):
for j in range(n):
if graph[i][j] == 2:
graph[i][j] = 0
virus_lst.append((i, j))
combs = combinations(virus_lst, m)
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k] + y, dx[k] + x
if 0 <= b < n and 0 <= a < n and g[b][a] == 0:
g[b][a] = g[y][x] - 1
dq.append((b, a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j] == 0:
return -1
mm = min(g[i][j], mm)
return -mm - 1
result = []
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
flag = False
time = 25000
for r in result:
if r != -1:
time = min(time, r)
flag = True
print(time if flag else -1)
|
import sys; input = sys.stdin.readline
from collections import deque
from itertools import combinations
from copy import deepcopy
n, m = map(int, input().split())
graph = [list(map(int,input().split())) for i in range(n)]
virus_lst = []
for i in range(n):
for j in range(n):
if graph[i][j]==2:
graph[i][j] = 0
virus_lst.append((i, j))
combs = combinations(virus_lst, m)
dy, dx = [-1, 1, 0, 0], [0, 0, -1, 1]
def bfs(start_nodes, g):
dq = deque()
dq.extend(start_nodes)
for i, j in start_nodes:
g[i][j] = -1
while dq:
y, x = dq.popleft()
for k in range(4):
b, a = dy[k]+y, dx[k]+x
if 0<=b<n and 0<=a<n and g[b][a]==0:
g[b][a] = g[y][x] - 1
dq.append((b,a))
mm = 25000
for i in range(n):
for j in range(n):
if g[i][j]==0:
return -1
mm = min(g[i][j], mm)
return -mm-1
result = []
for comb in combs:
result.append(bfs(comb, deepcopy(graph)))
flag = False
time = 25000
for r in result:
if r!=-1:
time = min(time, r)
flag = True
print(time if flag else -1)
|
[
0,
1,
2,
3,
5
] |
2,156 |
ef57f0dfea261f022ced36ef9e27a07d63c21026
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('eCom', '0014_auto_20210617_1503')]
operations = [migrations.RemoveField(model_name='order', name='items'),
migrations.AddField(model_name='order', name='items', field=models.
ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
to='eCom.orderitem'))]
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('eCom', '0014_auto_20210617_1503')]
operations = [migrations.RemoveField(model_name='order', name='items'),
migrations.AddField(model_name='order', name='items', field=models.
ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,
to='eCom.orderitem'))]
|
# Generated by Django 3.2.4 on 2021-06-18 01:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('eCom', '0014_auto_20210617_1503'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='items',
),
migrations.AddField(
model_name='order',
name='items',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='eCom.orderitem'),
),
]
|
[
0,
1,
2,
3,
4
] |
2,157 |
43b9d308bb8d2b38c5f539e8700f5c2d8fe2287d
|
<mask token>
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub('[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print('creating directory ' + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if browser == None:
print('creating browser for the first and last time')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
<mask token>
def getCastInfo(page_soup):
cast_table = page_soup.find('table', {'class': 'cast_list'})
cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}
) + cast_table.findAll('tr', {'class': 'even'})
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll('td')
if len(td_arr) < 4:
continue
actor_elem = td_arr[1]
actor_anchor = actor_elem.find('a')
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({'url': character_url, 'name':
character_name})
cast_and_character.append({'actor': actor_info,
'character_and_episodes': character_info})
return cast_and_character
def checkvalidtext(txt):
if txt.isspace():
return False
arr = ['|', 'See more', '»', ',']
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = '#'
for val in arr:
if checkvalidtext(val) == False:
continue
if val[-1] == ':':
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find('div', {'class': 'article', 'id':
'titleDetails'})
divs = detail_elem.findAll('div')
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if attr == 'Official Sites' or attr == '#' or attr == 'Color':
continue
details[attr] = value
return details
<mask token>
|
<mask token>
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub('[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print('creating directory ' + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if browser == None:
print('creating browser for the first and last time')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
<mask token>
def getSoupFromElement(element):
html = element.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'html.parser')
return soup
def processPageAnchor(anchorElem):
url = anchorElem['href']
text = anchorElem.find(text=True).strip()
return url, text
def getCastInfo(page_soup):
cast_table = page_soup.find('table', {'class': 'cast_list'})
cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}
) + cast_table.findAll('tr', {'class': 'even'})
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll('td')
if len(td_arr) < 4:
continue
actor_elem = td_arr[1]
actor_anchor = actor_elem.find('a')
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({'url': character_url, 'name':
character_name})
cast_and_character.append({'actor': actor_info,
'character_and_episodes': character_info})
return cast_and_character
def checkvalidtext(txt):
if txt.isspace():
return False
arr = ['|', 'See more', '»', ',']
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = '#'
for val in arr:
if checkvalidtext(val) == False:
continue
if val[-1] == ':':
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find('div', {'class': 'article', 'id':
'titleDetails'})
divs = detail_elem.findAll('div')
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if attr == 'Official Sites' or attr == '#' or attr == 'Color':
continue
details[attr] = value
return details
<mask token>
|
<mask token>
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub('[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print('creating directory ' + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if browser == None:
print('creating browser for the first and last time')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
def performClick(driver, element):
driver.execute_script('arguments[0].click();', element)
def getSoupFromElement(element):
html = element.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'html.parser')
return soup
def processPageAnchor(anchorElem):
url = anchorElem['href']
text = anchorElem.find(text=True).strip()
return url, text
def getCastInfo(page_soup):
cast_table = page_soup.find('table', {'class': 'cast_list'})
cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}
) + cast_table.findAll('tr', {'class': 'even'})
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll('td')
if len(td_arr) < 4:
continue
actor_elem = td_arr[1]
actor_anchor = actor_elem.find('a')
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({'url': character_url, 'name':
character_name})
cast_and_character.append({'actor': actor_info,
'character_and_episodes': character_info})
return cast_and_character
def checkvalidtext(txt):
if txt.isspace():
return False
arr = ['|', 'See more', '»', ',']
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = '#'
for val in arr:
if checkvalidtext(val) == False:
continue
if val[-1] == ':':
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find('div', {'class': 'article', 'id':
'titleDetails'})
divs = detail_elem.findAll('div')
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if attr == 'Official Sites' or attr == '#' or attr == 'Color':
continue
details[attr] = value
return details
<mask token>
|
<mask token>
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub('[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print('creating directory ' + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if browser == None:
print('creating browser for the first and last time')
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
def performClick(driver, element):
driver.execute_script('arguments[0].click();', element)
def getSoupFromElement(element):
html = element.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'html.parser')
return soup
def processPageAnchor(anchorElem):
url = anchorElem['href']
text = anchorElem.find(text=True).strip()
return url, text
def getCastInfo(page_soup):
cast_table = page_soup.find('table', {'class': 'cast_list'})
cast_elem_arr = cast_table.findAll('tr', {'class': 'odd'}
) + cast_table.findAll('tr', {'class': 'even'})
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll('td')
if len(td_arr) < 4:
continue
actor_elem = td_arr[1]
actor_anchor = actor_elem.find('a')
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {'@type': 'Person', 'url': actor_url, 'name': actor_name}
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({'url': character_url, 'name':
character_name})
cast_and_character.append({'actor': actor_info,
'character_and_episodes': character_info})
return cast_and_character
def checkvalidtext(txt):
if txt.isspace():
return False
arr = ['|', 'See more', '»', ',']
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = '#'
for val in arr:
if checkvalidtext(val) == False:
continue
if val[-1] == ':':
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find('div', {'class': 'article', 'id':
'titleDetails'})
divs = detail_elem.findAll('div')
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if attr == 'Official Sites' or attr == '#' or attr == 'Color':
continue
details[attr] = value
return details
def processOneMovie(movie_url, folder_path, driver, try_cnt=0):
try:
if try_cnt == 0:
driver = initialize(movie_url, driver)
page_html = driver.page_source
page_soup = BeautifulSoup(page_html, 'html.parser')
query_result = page_soup.find('script', {'type': 'application/ld+json'}
)
meta_data = json.loads(query_result.string)
try:
meta_data['cast_and_character'] = getCastInfo(page_soup)
except:
meta_data['cast_and_character'
] = 'Error loading cast information -- checked {}'.format(
datetime.datetime.now())
meta_data['details'] = parseDetailInfo(page_soup)
movie_id = meta_data['url'].split('/')[-2]
movie_name = meta_data['name']
file_name = '{}__{}'.format(movie_id, simplify_string(movie_name)
) + '.json'
with open(folder_path + '/' + file_name, 'w') as f:
json.dump(meta_data, f)
print('saved movie < {} > to < {} >'.format(movie_name, file_name))
return True
except:
if try_cnt == 17:
print('Error loading movie -- skip this')
return False
print(
'maybe temporary internet connection problem. trying again < {} >'
.format(try_cnt + 1))
driver.refresh()
time.sleep(2)
return processOneMovie(movie_url, folder_path, driver, try_cnt + 1)
<mask token>
def loadFailCases():
try:
with open('fail_cases.json', 'r') as f:
fail_cases = json.load(f)
except:
print(
'Could not find fail_cases.json -- initializing with empty folder')
fail_cases = []
return fail_cases
<mask token>
|
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.common.action_chains import ActionChains
import time
import json
import re
import os
import datetime
###########################################################################
driver_path = "/home/arnab/Codes/00_Libs/chromedriver_linux64/chromedriver"
###########################################################################
def simplify_string(inp):
inp = inp.lower().strip()
inp = re.sub(r'[^A-Za-z0-9]', '_', inp)
return inp
def makeDirectory(path):
print("creating directory " + path)
try:
os.mkdir(path)
except FileExistsError:
pass
def initialize(url, browser=None):
if(browser == None):
print("creating browser for the first and last time")
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
browser = webdriver.Chrome(driver_path, chrome_options=chrome_options)
browser.implicitly_wait(3)
browser.get(url)
browser.implicitly_wait(3)
return browser
def performClick(driver, element):
driver.execute_script("arguments[0].click();", element)
def getSoupFromElement(element):
html = element.get_attribute('innerHTML')
soup = BeautifulSoup(html, 'html.parser')
return soup
def processPageAnchor(anchorElem):
url = anchorElem['href']
text = anchorElem.find(text=True).strip()
return url, text
def getCastInfo(page_soup):
cast_table = page_soup.find("table", {"class": "cast_list"})
# print(" >>>>>>>>>>>>>>>>>>>>>>>>> ")
# print(cast_table.prettify())
cast_elem_arr = cast_table.findAll("tr", {"class": "odd"}) + cast_table.findAll("tr", {"class": "even"})
# print(len(cast_elem_arr))
# print(cast_elem_arr[0].prettify())
cast_and_character = []
for cast_elem in cast_elem_arr:
td_arr = cast_elem.findAll("td")
if(len(td_arr) < 4):
continue
# print(td_arr[1].prettify())
actor_elem = td_arr[1]
actor_anchor = actor_elem.find("a")
actor_url, actor_name = processPageAnchor(actor_anchor)
actor_info = {
"@type" : "Person",
"url" : actor_url,
"name" : actor_name
}
# print(actor_info)
# print(td_arr[3].prettify())
character_elem = td_arr[3]
character_info = []
character_anchor_arr = character_elem.findAll('a')
for character_anchor in character_anchor_arr:
character_url, character_name = processPageAnchor(character_anchor)
character_info.append({
"url" : character_url,
"name" : character_name
})
# print(character_info)
cast_and_character.append({
"actor" : actor_info,
"character_and_episodes" : character_info
})
# print(cast_and_character)
# print(len(cast_and_character))
return cast_and_character
def checkvalidtext(txt):
if(txt.isspace()):
return False
arr = ["|", "See more", "\u00bb", ","]
if txt in arr:
return False
if txt.strip() in arr:
return False
return True
def filter(arr):
ret = []
attr = "#"
for val in arr:
if(checkvalidtext(val) == False):
continue
if(val[-1] == ":"):
attr = val[0:-1]
continue
ret.append(val.strip())
return attr, ret
def parseDetailInfo(page_soup):
detail_elem = page_soup.find("div", {
'class': 'article',
'id': "titleDetails"
})
divs = detail_elem.findAll("div")
details = {}
for div in divs:
vrr = div.findAll()
attr, value = filter(div.findAll(text=True))
if(attr == "Official Sites" or attr == "#" or attr == "Color"):
continue
# print(attr, " >>>>>> ", value)
details[attr] = value
return details
def processOneMovie(movie_url, folder_path, driver, try_cnt = 0):
# if(True):
try:
if(try_cnt == 0):
driver = initialize(movie_url, driver)
page_html = driver.page_source
page_soup = BeautifulSoup(page_html, 'html.parser')
# print(page_soup.prettify())
query_result = page_soup.find("script", {"type": "application/ld+json"})
# print(query_result.string)
meta_data = json.loads(query_result.string)
try:
meta_data["cast_and_character"] = getCastInfo(page_soup)
except:
meta_data["cast_and_character"] = "Error loading cast information -- checked {}".format(datetime.datetime.now())
meta_data['details'] = parseDetailInfo(page_soup)
movie_id = meta_data["url"].split('/')[-2]
movie_name = meta_data["name"]
file_name = "{}__{}".format(movie_id, simplify_string(movie_name)) + ".json"
# print(file_name)
# print(meta_data)
with open(folder_path + "/" + file_name, "w") as f:
json.dump(meta_data, f)
print("saved movie < {} > to < {} >".format(movie_name, file_name))
return True
except:
if(try_cnt == 17):
print("Error loading movie -- skip this")
return False
print("maybe temporary internet connection problem. trying again < {} >".format(try_cnt + 1))
driver.refresh()
time.sleep(2)
return processOneMovie(movie_url, folder_path, driver, try_cnt+1)
#############################################################################################################
url_root = "https://www.imdb.com/"
save_path = "MOVIES"
summary_path = "IMDB_SUMMARY/SUMMARY_DATA"
frm = 1
rng = 250
limit = 600000 # set it to -1 for all processing
#############################################################################################################
makeDirectory(save_path)
summary_files = sorted(os.listdir(summary_path))
driver = initialize(url_root)
def loadFailCases():
try:
with open("fail_cases.json", "r") as f:
fail_cases = json.load(f)
except:
print("Could not find fail_cases.json -- initializing with empty folder")
fail_cases = []
return fail_cases
print(summary_files)
# for summary in summary_files:
while(True):
summary = "{} - {}.json".format(frm, frm+rng-1)
if(summary not in summary_files):
print("Could not fild summary file < {} >".format(summary))
break
print("Now processing < {} >".format(summary))
folder_name = summary.split('.')[0]
folder_path = save_path + "/" + folder_name
makeDirectory(folder_path)
with open(summary_path + "/" + summary) as f:
movie_arr = json.load(f)
# print(type(movie_arr))
# print(movie_arr)
process_cnt = 0
st = 0
# if(frm == 65251):
# st = 173
for idx in range(st, len(movie_arr)):
movie = movie_arr[idx]
# print(movie["link"])
movie_url = url_root + movie["link"]
success = processOneMovie(movie_url, folder_path, driver)
if(success == False):
fail_cases = loadFailCases()
fail_cases.append(movie)
with open("fail_cases.json", "w") as f:
json.dump(fail_cases, f)
process_cnt += 1
print(">>>>>>>>>>>>>>>>>>>>>>>>>> processed {} of {} --- of :: {}".format(st + process_cnt, len(movie_arr), summary))
frm += rng
if limit == -1:
continue
elif (frm > limit):
break
|
[
7,
9,
10,
12,
16
] |
2,158 |
06848ec0e327fed1da00446cec6392c6f42130af
|
<mask token>
|
<mask token>
for i in range(x, y + 1):
if i > 1:
for j in range(2, i):
if i % j == 0:
break
else:
count += 1
print(count)
|
<mask token>
x, y = map(int, input().split())
count = 0
for i in range(x, y + 1):
if i > 1:
for j in range(2, i):
if i % j == 0:
break
else:
count += 1
print(count)
|
'''Given a range of 2 numbers (i.e) L and R count the number of prime numbers in the range (inclusive of L and R ).
Input Size : L <= R <= 100000(complexity O(n) read about Sieve of Eratosthenes)
Sample Testcase :
INPUT
2 5
OUTPUT
3'''
x,y=map(int,input().split())
count=0
for i in range(x,y+1):
if i>1:
for j in range(2,i):
if(i%j==0):
break
else:
count+=1
print(count)
| null |
[
0,
1,
2,
3
] |
2,159 |
e9918f4fac2e13b36d9b20ffc28dc6508aad6f9b
|
<mask token>
|
class Solution:
<mask token>
|
class Solution:
def numSmallerByFrequency(self, queries: List[str], words: List[str]
) ->List[int]:
words_freq = {word: word.count(min(word)) for word in words}
queries_freq = {}
ans = []
for query in queries:
if query in queries_freq:
ans.append(queries_freq[query])
continue
query_freq = query.count(min(query))
num = sum([(1 if query_freq < words_freq[word] else 0) for word in
words])
ans.append(num)
queries_freq[query] = num
return ans
|
class Solution:
# complexity: 2*n^2 + 4*n^2 -> 8*n^2
def numSmallerByFrequency(self, queries: List[str], words: List[str]) -> List[int]:
# complexity: n*2*l where l is the length of the word -> 2*n^2
words_freq = {
word: word.count(min(word)) for word in words
}
queries_freq = {}
ans = []
# complexity: q*4*n where q is the length of queries -> 4n^2
for query in queries:
if query in queries_freq:
ans.append(queries_freq[query])
continue
# complexity: 2*l where l is the length of the word -> 2*n
query_freq = query.count(min(query))
# complexity: n*n due the iteration and the sum -> 2*n
num = sum([1 if query_freq < words_freq[word]
else 0 for word in words])
ans.append(num)
queries_freq[query] = num
return ans
| null |
[
0,
1,
2,
3
] |
2,160 |
a718949ed95b7d78f091b1e0f237eed151b102ae
|
<mask token>
|
from .most_serializers import *
| null | null | null |
[
0,
1
] |
2,161 |
e1172cadeb8b2ce036d8431cef78cfe19bda0cb8
|
<mask token>
|
<mask token>
print('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')
|
celsius = input('Enter temperature in Celsius')
celsius = int(celsius)
fah = celsius * 9 / 5 + 32
print('Temp in ', celsius, 'celsius=', fah, ' Fahrenheit')
|
#Program to convert temp in degree Celsius to temp in degree Fahrenheit
celsius=input("Enter temperature in Celsius")
celsius=int(celsius)
fah=(celsius*9/5)+32
print("Temp in ",celsius,"celsius=",fah," Fahrenheit")
| null |
[
0,
1,
2,
3
] |
2,162 |
0ad529298f321d2f3a63cde8179a50cf2881ee00
|
<mask token>
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
if not args.convert_to_onnx:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(
)
num_train_pids = 100
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids,
loss={'xent', 'htri'}, pretrained=False if args.load_weights else
'imagenet', grayscale=args.grayscale, ceil_mode=not args.
convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=
args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=
args.convbn)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,
'save_folder': args.save_dir, 'data_source': args.
quant_data_dir, 'use_gpu': False, 'batch_size': 1,
'num_workers': 0, 'verbose': True, 'save_params': args.
save_quantized_model, 'quantize_forward': True,
'num_input_channels': num_channels, 'raw_input': args.
no_normalize, 'double_precision': args.double_precision}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test)
quantizer.quantize_model()
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir,
'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
if not 'lfw' in name.lower():
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader,
use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(distmat, dm.
return_testdataset_by_name(name), save_dir=osp.join
(args.save_dir, 'ranked_results', name), topk=20)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,
dm.lfw_dataset, model, compute_embeddings_lfw, args.
test_batch_size, verbose=False, show_failed=args.
show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
return
<mask token>
|
<mask token>
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
if not args.convert_to_onnx:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(
)
num_train_pids = 100
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids,
loss={'xent', 'htri'}, pretrained=False if args.load_weights else
'imagenet', grayscale=args.grayscale, ceil_mode=not args.
convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=
args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=
args.convbn)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,
'save_folder': args.save_dir, 'data_source': args.
quant_data_dir, 'use_gpu': False, 'batch_size': 1,
'num_workers': 0, 'verbose': True, 'save_params': args.
save_quantized_model, 'quantize_forward': True,
'num_input_channels': num_channels, 'raw_input': args.
no_normalize, 'double_precision': args.double_precision}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test)
quantizer.quantize_model()
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir,
'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
if not 'lfw' in name.lower():
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader,
use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(distmat, dm.
return_testdataset_by_name(name), save_dir=osp.join
(args.save_dir, 'ranked_results', name), topk=20)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,
dm.lfw_dataset, model, compute_embeddings_lfw, args.
test_batch_size, verbose=False, show_failed=args.
show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
return
if __name__ == '__main__':
main()
|
<mask token>
parser = argument_parser()
args = parser.parse_args()
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
if not args.convert_to_onnx:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(
)
num_train_pids = 100
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids,
loss={'xent', 'htri'}, pretrained=False if args.load_weights else
'imagenet', grayscale=args.grayscale, ceil_mode=not args.
convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=
args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=
args.convbn)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,
'save_folder': args.save_dir, 'data_source': args.
quant_data_dir, 'use_gpu': False, 'batch_size': 1,
'num_workers': 0, 'verbose': True, 'save_params': args.
save_quantized_model, 'quantize_forward': True,
'num_input_channels': num_channels, 'raw_input': args.
no_normalize, 'double_precision': args.double_precision}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test)
quantizer.quantize_model()
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir,
'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
if not 'lfw' in name.lower():
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader,
use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(distmat, dm.
return_testdataset_by_name(name), save_dir=osp.join
(args.save_dir, 'ranked_results', name), topk=20)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,
dm.lfw_dataset, model, compute_embeddings_lfw, args.
test_batch_size, verbose=False, show_failed=args.
show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
return
if __name__ == '__main__':
main()
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
from collections import defaultdict
import numpy as np
import math
from functools import partial
from tqdm import tqdm
import glog as log
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger
from torchreid.utils.torchtools import count_num_param
from torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat
from torchreid.eval_metrics import test
from torchreid.utils.load_weights import load_weights
from torchreid.utils.absorb_bn import search_absorbed_bn
from torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw
parser = argument_parser()
args = parser.parse_args()
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu:
use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print('==========\nArgs:{}\n=========='.format(args))
if use_gpu:
print('Currently using GPU {}'.format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print('Currently using CPU, however, GPU is highly recommended')
print('Initializing image data manager')
if not args.convert_to_onnx:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders(
)
num_train_pids = 100
print('Initializing model: {}'.format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids,
loss={'xent', 'htri'}, pretrained=False if args.load_weights else
'imagenet', grayscale=args.grayscale, ceil_mode=not args.
convert_to_onnx, infer=True, bits=args.bits, normalize_embeddings=
args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=
args.convbn)
print('Model size: {:.3f} M'.format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {'bits': args.bits, 'accum_bits': 32, 'signed': True,
'save_folder': args.save_dir, 'data_source': args.
quant_data_dir, 'use_gpu': False, 'batch_size': 1,
'num_workers': 0, 'verbose': True, 'save_params': args.
save_quantized_model, 'quantize_forward': True,
'num_input_channels': num_channels, 'raw_input': args.
no_normalize, 'double_precision': args.double_precision}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test)
quantizer.quantize_model()
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir,
'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print('Evaluate only')
for name in args.target_names:
if not 'lfw' in name.lower():
print('Evaluating {} ...'.format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader,
use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(distmat, dm.
return_testdataset_by_name(name), save_dir=osp.join
(args.save_dir, 'ranked_results', name), topk=20)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args,
dm.lfw_dataset, model, compute_embeddings_lfw, args.
test_batch_size, verbose=False, show_failed=args.
show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(
same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
return
if __name__ == '__main__':
main()
|
from __future__ import print_function
from __future__ import division
import os
import sys
import time
import datetime
import os.path as osp
from collections import defaultdict
import numpy as np
import math
from functools import partial
from tqdm import tqdm
import glog as log
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from args import argument_parser, image_dataset_kwargs, optimizer_kwargs
from torchreid.data_manager import ImageDataManager
from torchreid import models
from torchreid.utils.iotools import save_checkpoint, check_isfile
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.loggers import Logger
from torchreid.utils.torchtools import count_num_param
from torchreid.utils.reidtools import visualize_ranked_results, distmat_hist, calc_distmat
from torchreid.eval_metrics import test
from torchreid.utils.load_weights import load_weights
from torchreid.utils.absorb_bn import search_absorbed_bn
from torchreid.evaluate_lfw import evaluate, compute_embeddings_lfw
# global variables
parser = argument_parser()
args = parser.parse_args()
def main():
global args
torch.manual_seed(args.seed)
if not args.use_avai_gpus: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_devices
use_gpu = torch.cuda.is_available()
if args.use_cpu: use_gpu = False
log_name = 'log_test.txt'
sys.stdout = Logger(osp.join(args.save_dir, log_name))
print("==========\nArgs:{}\n==========".format(args))
if use_gpu:
print("Currently using GPU {}".format(args.gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(args.seed)
else:
print("Currently using CPU, however, GPU is highly recommended")
print("Initializing image data manager")
if not args.convert_to_onnx: # and not args.infer:
dm = ImageDataManager(use_gpu, **image_dataset_kwargs(args))
trainloader, trainloader_dict, testloader_dict = dm.return_dataloaders()
num_train_pids = 100
print("Initializing model: {}".format(args.arch))
model = models.init_model(name=args.arch, num_classes=num_train_pids, loss={'xent', 'htri'},
pretrained=False if args.load_weights else 'imagenet', grayscale=args.grayscale,
ceil_mode=not args.convert_to_onnx, infer=True, bits=args.bits,
normalize_embeddings=args.normalize_embeddings, normalize_fc=args.normalize_fc, convbn=args.convbn)
print("Model size: {:.3f} M".format(count_num_param(model)))
if args.load_weights and check_isfile(args.load_weights):
# load pretrained weights but ignore layers that don't match in size
load_weights(model, args.load_weights)
print("Loaded pretrained weights from '{}'".format(args.load_weights))
if args.absorb_bn:
search_absorbed_bn(model)
if args.quantization or args.save_quantized_model:
from gap_quantization.quantization import ModelQuantizer
from gap_quantization.dump_utils import dump_quant_params, remove_extra_dump, remove_cat_files
if args.quant_data_dir is None:
raise AttributeError('quant-data-dir argument is required.')
num_channels = 1 if args.grayscale else 3
cfg = {
"bits": args.bits, # number of bits to store weights and activations
"accum_bits": 32, # number of bits to store intermediate convolution result
"signed": True, # use signed numbers
"save_folder": args.save_dir, # folder to save results
"data_source": args.quant_data_dir, # folder with images to collect dataset statistics
"use_gpu": False, # use GPU for inference
"batch_size": 1,
"num_workers": 0, # number of workers for PyTorch dataloader
"verbose": True,
"save_params": args.save_quantized_model, # save quantization parameters to the file
"quantize_forward": True, # replace usual convs, poolings, ... with GAP-like ones
"num_input_channels": num_channels,
"raw_input": args.no_normalize,
"double_precision": args.double_precision # use double precision convolutions
}
model = model.cpu()
quantizer = ModelQuantizer(model, cfg, dm.transform_test) # transform test is OK if we use args.no_normalize
quantizer.quantize_model() # otherwise we need to add QuantizeInput operation
if args.infer:
if args.image_path == '':
raise AttributeError('Image for inference is required')
quantizer.dump_activations(args.image_path, dm.transform_test,
save_dir=os.path.join(args.save_dir, 'activations_dump'))
dump_quant_params(args.save_dir, args.convbn)
if args.convbn:
remove_extra_dump(os.path.join(args.save_dir, 'activations_dump'))
remove_cat_files(args.save_dir)
if use_gpu:
model = nn.DataParallel(model).cuda()
if args.evaluate:
print("Evaluate only")
for name in args.target_names:
if not 'lfw' in name.lower():
print("Evaluating {} ...".format(name))
queryloader = testloader_dict[name]['query']
galleryloader = testloader_dict[name]['gallery']
distmat = test(args, model, queryloader, galleryloader, use_gpu, return_distmat=True)
if args.visualize_ranks:
visualize_ranked_results(
distmat, dm.return_testdataset_by_name(name),
save_dir=osp.join(args.save_dir, 'ranked_results', name),
topk=20
)
else:
model.eval()
same_acc, diff_acc, all_acc, auc, thresh = evaluate(args, dm.lfw_dataset, model, compute_embeddings_lfw,
args.test_batch_size, verbose=False, show_failed=args.show_failed, load_embeddings=args.load_embeddings)
log.info('Validation accuracy: {0:.4f}, {1:.4f}'.format(same_acc, diff_acc))
log.info('Validation accuracy mean: {0:.4f}'.format(all_acc))
log.info('Validation AUC: {0:.4f}'.format(auc))
log.info('Estimated threshold: {0:.4f}'.format(thresh))
#roc_auc(model, '/home/maxim/data/lfw/pairsTest.txt', '/media/slow_drive/cropped_lfw', args, use_gpu)
return
if __name__ == '__main__':
main()
|
[
1,
2,
3,
4,
5
] |
2,163 |
6cba431650ee8b74baa8310c144321b2e587155e
|
<mask token>
|
<mask token>
for color in list_1:
for size in list_2:
print(color, size)
<mask token>
list_3.reverse()
print(list_3)
|
list_1 = ['color', 'white', 'black']
list_2 = ['short', 'medium', 'large', 'xl']
for color in list_1:
for size in list_2:
print(color, size)
list_3 = [(color, size) for color in list_1 for size in list_2]
list_3.reverse()
print(list_3)
|
list_1 = ['color','white','black']#taking the colors of t-shirts as input
list_2 = ['short','medium','large','xl']#taking sizes of t-shirts as input
for color in list_1:
for size in list_2:
#using cartesien product asking to give output as the combinations of color and size of t-shirts we have
print(color,size)
#using list comprehension for the above input giving the same output but in reverse order
list_3 = [ (color,size) for color in list_1 for size in list_2]
list_3.reverse()#used reverse method in lists
print(list_3)
| null |
[
0,
1,
2,
3
] |
2,164 |
810017cd5814fc20ebcdbdf26a32ea1bcfc88625
|
<mask token>
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
<mask token>
|
<mask token>
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
<mask token>
|
<mask token>
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
def test_linear_transform_undefined():
eta = ETA()
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert eta.eta_epoch is None
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
|
from collections import deque
from etaprogress.eta import ETA
def test_linear_slope_1():
eta = ETA(100)
eta._timing_data = deque([(10, 10), (20, 20), (30, 30), (40, 40)])
getattr(eta, '_calculate')()
assert 100 == eta.eta_epoch
assert 1.0 == eta.rate
assert 1.0 == eta.rate_unstable
def test_linear_slope_2():
eta = ETA(100)
eta._timing_data = deque([(10, 20), (20, 40), (30, 60), (40, 80)])
getattr(eta, '_calculate')()
assert 50 == eta.eta_epoch
assert 2.0 == eta.rate
assert 2.0 == eta.rate_unstable
def test_linear_transform():
"""Wolfram Alpha:
x is the timestamp. y is the numerator. 120 is the denominator.
linear fit {1.2, 22},{2.4, 58},{3.1, 102},{4.4, 118}
The closer we get to 100%, the more vertical shift/transform is applied to the line.
As we near the end we want the line to get closer to the last point on the graph.
This avoids having 99% with an ETA in the past.
"""
eta = ETA(120)
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert 4.4 < eta.eta_epoch < 4.6
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
def test_linear_transform_undefined():
eta = ETA()
eta._timing_data = deque([(1.2, 22), (2.4, 58), (3.1, 102), (4.4, 118)])
getattr(eta, '_calculate')()
assert eta.eta_epoch is None
assert 30 < eta.rate < 35
assert 12 < eta.rate_unstable < 13
| null |
[
2,
3,
4,
5
] |
2,165 |
013189cd67cc44efd539c75ed235a0753d95f54e
|
<mask token>
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
<mask token>
|
<mask token>
np.set_printoptions(suppress=True)
<mask token>
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
<mask token>
print(data_x.shape)
print(data_y.shape)
<mask token>
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
<mask token>
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
|
<mask token>
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24 * 6
TIME_STEPS = SHIFT_DAYS * PRED_STEPS
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
pow_scaler, df = getData()
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',
'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_y = data_train['pow'].astype('float64')
data_y_val = validation_data['pow'].astype('float64')
print(data_x.shape)
print(data_y.shape)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,
objective='val_loss')
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
predictions = clf.predict(data_x_test)
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
|
import pandas as pd
import tensorflow as tf
import autokeras as ak
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from numpy import concatenate
from pandas import read_csv, DataFrame, concat
from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24 * 6
TIME_STEPS = SHIFT_DAYS * PRED_STEPS
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file + '.csv', encoding='CP949', converters={
'date': int})
print(power_df.shape)
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file + '.csv', encoding='CP949', converters
={'date': int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns,
index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range=(0, 1))
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.
columns, index=list(weather_df.index.values))
df = weather_scaleddf.copy()
df.insert(0, 'pow', power_scaleddf.values, True)
return pow_scaler, df
pow_scaler, df = getData()
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_val = validation_data[['pow', 'temp', 'humidity', 'windspeed',
'windgust', 'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_x_test = dataset[['pow', 'temp', 'humidity', 'windspeed', 'windgust',
'maxdailygust', 'winddir', 'hourlyrainin', 'dailyrainin',
'weeklyrainin', 'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv',
'feelslike', 'dewpoint', 'outside_status']].astype('float64')
data_y = data_train['pow'].astype('float64')
data_y_val = validation_data['pow'].astype('float64')
print(data_x.shape)
print(data_y.shape)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(lookback=lookback, predict_from=predict_from,
objective='val_loss')
clf.fit(x=data_x, y=data_y, validation_data=(data_x_val, data_y_val),
batch_size=128, epochs=10)
predictions = clf.predict(data_x_test)
print(predictions.shape)
print(clf.evaluate(data_x_val, data_y_val))
|
import pandas as pd
import tensorflow as tf
import autokeras as ak
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tensorflow as tf
from numpy import concatenate
from pandas import read_csv, DataFrame, concat
from sklearn.preprocessing import MinMaxScaler
np.set_printoptions(suppress=True)
EPOCHS = 10
BATCH_SIZE = 128
SHIFT_DAYS = 3
PRED_STEPS = 24*6 #48hr * 10분단위 예측
TIME_STEPS = SHIFT_DAYS*PRED_STEPS #hours step
DIMENSION = 15
MODEL_NUM = 10
CAPACITY = 89.7
TRAIN_RATIO = 0.6
VAL_RATIO = 0.2
START_DATE = '2021012899'
END_DATE = '2021042924'
SAVE_PATH = './data/'
SAVE_NAME = 'autoML_Test'
def getData():
# power
power_file = './data/power_20210129_20210429_preprocess_1hour'
power_df = read_csv(power_file+'.csv', encoding='CP949', converters={'date':int})
print(power_df.shape)
# sensor
sensor_file = 'data/sensor_20210129_20210429_preprocess_1hour'
sensor_df = read_csv(sensor_file+'.csv', encoding='CP949', converters={'date':int})
sensor_df = sensor_df.sort_values('date')
print(sensor_df.shape)
# scale
power_df.drop(['date'], axis=1, inplace=True)
pow_scaler = MinMaxScaler(feature_range = (0, 1))
scaled_pow = pow_scaler.fit_transform(power_df.values)
power_scaleddf = pd.DataFrame(scaled_pow, columns=power_df.columns, index=list(power_df.index.values))
weather_df = sensor_df.copy()
weather_df.drop(['date'], axis=1, inplace=True)
weather_scaler = MinMaxScaler(feature_range = (0, 1))#scale
scaled_weather = weather_scaler.fit_transform(weather_df.values)
weather_scaleddf = pd.DataFrame(scaled_weather, columns=weather_df.columns, index=list(weather_df.index.values))
# JOIN
df = weather_scaleddf.copy()
# pow + weather + powY
df.insert(0, 'pow', power_scaleddf.values, True)
#df = df.iloc[0:-TIME_STEPS, :]
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.insert(df.shape[1], 'pow_Y', power_scaleddf.iloc[TIME_STEPS:, :].values, True)
#df.to_csv(SAVE_PATH+"total_scaled"+SAVE_NAME+".csv",mode='w',index=False, encoding='CP949')
#display(df)
return pow_scaler, df
pow_scaler, df = getData()
#display(df)
dataset = df
val_split = int(len(dataset) * 0.7)
data_train = dataset[:val_split]
validation_data = dataset[val_split:]
data_x = data_train[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_x_val = validation_data[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
# Data with train data and the unseen data from subsequent time steps.
data_x_test = dataset[
[
'pow', 'temp', 'humidity', 'windspeed', 'windgust', 'maxdailygust',
'winddir', 'hourlyrainin', 'dailyrainin', 'weeklyrainin',
'monthlyrainin', 'yearlyrainin', 'solarradiation', 'uv', 'feelslike',
'dewpoint', 'outside_status'
]
].astype("float64")
data_y = data_train["pow"].astype("float64")
data_y_val = validation_data["pow"].astype("float64")
print(data_x.shape) # (6549, 12)
print(data_y.shape) # (6549,)
predict_from = 1
predict_until = 10
lookback = 3
clf = ak.TimeseriesForecaster(
lookback=lookback,
predict_from=predict_from,
#predict_until=predict_until,
#max_trials=1,
objective="val_loss",
)
# Train the TimeSeriesForecaster with train data
clf.fit(
x=data_x,
y=data_y,
validation_data=(data_x_val, data_y_val),
batch_size=128,
epochs=10,
)
# Predict with the best model(includes original training data).
predictions = clf.predict(data_x_test)
print(predictions.shape)
# Evaluate the best model with testing data.
print(clf.evaluate(data_x_val, data_y_val))
|
[
1,
2,
3,
4,
5
] |
2,166 |
957e18b2536cda69ba1db571d0308d5e392fe488
|
<mask token>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
<mask token>
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<mask token>
|
<mask token>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<mask token>
|
<mask token>
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
<mask token>
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat=i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg
.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(
architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model,
training_features, training_targets, {(0): 1.0,
(1): class_weight})
precision, recall, f1 = EvaluateModelTest(cfg,
model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +
',' + str(la4) + ',' + str(la5) + ',' + str(
class_weight) + ',' + regularizer + ',' +
activation_function + ',' + str(precision) +
',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(
best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg
.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
|
<mask token>
cfg = Config()
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(
training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO
)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer,
activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=
cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0,
patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE,
verbose=1, validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS,
class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(
test_targets, predictions, average='macro')
f1 = 2 * (precision * recall / (precision + recall))
return precision, recall, f1
training_X, training_y, test_X, test_Y = FetchData(cfg)
training_features = np.array(training_X)
training_targets = np.array(training_y)
test_features = np.array(test_X)
test_targets = np.array(test_Y)
input_shape = len(training_features[0]),
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat=i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg
.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(
architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model,
training_features, training_targets, {(0): 1.0,
(1): class_weight})
precision, recall, f1 = EvaluateModelTest(cfg,
model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) +
',' + str(la4) + ',' + str(la5) + ',' + str(
class_weight) + ',' + regularizer + ',' +
activation_function + ',' + str(precision) +
',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(
best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg
.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
|
from config import Config
import numpy as np
from itertools import product
from sklearn.utils import shuffle
from sklearn.metrics import precision_recall_fscore_support
from keras import callbacks, regularizers
from keras.models import Sequential
from keras.layers import Dense, InputLayer
from keras import backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from src.classification_data_tools import limit_negative_samples
import pickle
from tensorflow import set_random_seed
import tensorflow as tf
cfg = Config()
def FetchData(cfg):
with open(cfg.FILE, 'rb') as f:
data = pickle.load(f)
if cfg.SHUFFLE:
features, targets = shuffle(data[0], data[1])
else:
features = data[0]
targets = data[1]
training_features = features[:int(len(data[0]) * cfg.TRAINING_CUT) - 1]
training_targets = targets[:int(len(data[1]) * cfg.TRAINING_CUT) - 1]
test_features = features[int(len(data[0]) * cfg.TRAINING_CUT):]
test_targets = targets[int(len(data[1]) * cfg.TRAINING_CUT):]
if cfg.NEGATIVE_SAMPLES_RATIO != 0:
training_features, training_targets = limit_negative_samples(training_features, training_targets, cfg.NEGATIVE_SAMPLES_RATIO)
return training_features, training_targets, test_features, test_targets
def BuildModel(cfg, input_shape, iftest, hidden_layers, regularizer, activation_function):
if regularizer == 'l1':
regularizer = regularizers.l1(0.05)
elif regularizer == 'l2':
regularizer = regularizers.l2(0.05)
elif regularizer == 'none':
regularizer = None
model = Sequential()
model.add(InputLayer(input_shape))
if iftest:
for layer in hidden_layers:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=regularizer, activation=activation_function))
else:
for layer in cfg.HIDDEN_LAYERS:
model.add(Dense(layer, use_bias=cfg.BIAS, kernel_regularizer=cfg.REGULARIZER, activation=cfg.ACTIVATION_FUNCTION))
model.add(Dense(1, use_bias=cfg.BIAS, activation='sigmoid'))
model.compile(loss=cfg.LOSS, optimizer=cfg.OPTIMIZER, metrics=['accuracy'])
return model
def TrainModel(cfg, model, training_features, training_targets, cw):
if cfg.EARLY_STOPPING:
es = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=cfg.EARLY_STOPPING_PATIENCE, verbose=0, mode='min')
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, callbacks=[es], class_weight=cw, batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
else:
model.fit(training_features, training_targets, epochs=cfg.EPOCHS, class_weight=cw,
batch_size=cfg.BATCH_SIZE, verbose=1,
validation_split=1 - cfg.TRAINING_CUT)
return model
def EvaluateModel(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
print(str(precision) + ', ' + str(recall) + ', ' + str(f1))
def reset_keras():
sess = K.get_session()
K.clear_session()
sess.close()
sess = K.get_session()
np.random.seed(1)
tf.set_random_seed(2)
def EvaluateModelTest(cfg, model, test_features, test_targets):
predictions = model.predict(test_features)
for prediction in predictions:
if prediction[0] < 0.5:
prediction[0] = 0
else:
prediction[0] = 1
precision, recall, fscore, support = precision_recall_fscore_support(test_targets, predictions, average='macro')
f1 = 2 * ((precision * recall) / (precision + recall))
return precision, recall, f1
#estimator = KerasClassifier(build_fn=model, epochs=4, batch_size=32, verbose=1)
#kfold = StratifiedKFold(n_splits=10, shuffle=True)
#results = cross_val_score(estimator, test_features, test_targets, cv=kfold)
#print("Results: %.2f%% (%.2f%%)" % (results.mean() * 100, results.std() * 100))
training_X, training_y, test_X, test_Y = FetchData(cfg)
training_features = np.array(training_X)
training_targets = np.array(training_y)
test_features = np.array(test_X)
test_targets = np.array(test_Y)
input_shape = (len(training_features[0]),)
if cfg.MULTIPLE_ARCHITECTURES:
best_architecture = []
best_regularizer = ''
best_activation_function = ''
best_precision = 0
best_recall = 0
best_f1 = 0
count_max = 0
counter = 0
architecture_list = []
for i in range(cfg.TEST_LAYERS_MIN, cfg.TEST_LAYERS_MAX + 1):
prod = list(product(cfg.TEST_NOTES, repeat = i))
architecture_list.extend(prod)
count_max = len(architecture_list) * len(cfg.TEST_REGULARIZERS) * len(cfg.TEST_ACTIVATION_FUNCTIONS) * len(cfg.TEST_CLASS_WEIGHTS)
with open('output/wrapper_test_mean.csv', 'a') as f:
f.write('1,2,3,4,5,cw,regularizer,activation,precision,recall,f1\n')
for architecture in architecture_list:
for regularizer in cfg.TEST_REGULARIZERS:
for activation_function in cfg.TEST_ACTIVATION_FUNCTIONS:
for class_weight in cfg.TEST_CLASS_WEIGHTS:
reset_keras()
print(str(counter) + '/' + str(count_max))
model = BuildModel(cfg, input_shape, True, list(architecture), regularizer, activation_function)
model_trained = TrainModel(cfg, model, training_features, training_targets, {0: 1., 1: class_weight})
precision, recall, f1 = EvaluateModelTest(cfg, model_trained, test_features, test_targets)
if recall > best_recall:
best_precision = precision
best_recall = recall
best_f1 = f1
best_architecture = list(architecture)
best_regularizer = regularizer
best_activation_function = activation_function
la1 = list(architecture)[0]
la2 = 0
la3 = 0
la4 = 0
la5 = 0
if len(list(architecture)) >= 2:
la2 = list(architecture)[1]
if len(list(architecture)) >= 3:
la3 = list(architecture)[2]
if len(list(architecture)) >= 4:
la4 = list(architecture)[3]
if len(list(architecture)) >= 5:
la5 = list(architecture)[4]
f.write(str(la1) + ',' + str(la2) + ',' + str(la3) + ',' + str(la4) + ',' + str(la5) + ',' + str(class_weight) + ',' + regularizer + ',' + activation_function + ',' + str(precision) + ',' + str(recall) + ',' + str(f1) + '\n')
counter += 1
print('BEST ARCHITECTURE:')
print(best_architecture)
print(best_regularizer)
print(best_activation_function)
print('precision: ' + str(best_precision) + ', recall: ' + str(best_recall) + ', f1: ' + str(best_f1))
else:
reset_keras()
model = BuildModel(cfg, input_shape, False, 0, 0, 0)
model = TrainModel(cfg, model, training_features, training_targets, cfg.CLASS_WEIGHT)
EvaluateModel(cfg, model, test_features, test_targets)
|
[
5,
6,
7,
8,
10
] |
2,167 |
9a9fdf0f3cfb876a384059f3dcf2508f960168c2
|
# hi :)
import numpy as np
import random
from copy import deepcopy
# initialization....
# see also prepare.sh
header = np.loadtxt("header.txt", dtype=int)
TIME = header[2]
CARS = header[3]
STARTPOINT = header[4]
GRAPH = np.loadtxt("links.txt",dtype=int)
number_of_links = GRAPH.shape[0]
N = len(GRAPH[:,1])
VOIS=[]
TPS=[]
DIST=[]
AWARD=[]
for i in range(N):
VOIS.append([])
TPS.append([])
DIST.append([])
for i in range(N):
VOIS[GRAPH[i,0]].append(GRAPH[i,1])
TPS[GRAPH[i,0]].append(GRAPH[i,3])
DIST[GRAPH[i,0]].append(GRAPH[i,4])
if GRAPH[i,2] == 2:
VOIS[GRAPH[i,1]].append(GRAPH[i,0])
TPS[GRAPH[i,1]].append(GRAPH[i,3])
DIST[GRAPH[i,1]].append(GRAPH[i,4])
# VOIS[2803] = [1231, 123,123]
# TPS[2803] = [10s, 20s, 30s]
# DIST[2803] = [10m, 200m, 300m]
# the main code
def best_neighbour(current_node, current_cost):
# fix
neighbours = VOIS[current_node]
# filter very costly
good_neighbours_indexes = []
for n in range(len(neighbours)):
if current_cost + TPS[current_node][n] <= TIME:
good_neighbours_indexes.append(n)
if len(good_neighbours_indexes) > 0:
for n in good_neighbours_indexes:
possible_next_node = VOIS[current_node][n]
possible_cost = TPS[current_node][n]
bn = best_neighbour(possible_next_node,
current_cost + possible_next_node)
# awards = [DIST[current_node][ind]
# for ind in good_neighbours_indexes]
# maward = max(awards)
# indexes = [ind for ind in good_neighbours_indexes
# if DIST[current_node][ind] == maward]
best_neighbour_index = random.choice(indexes)
cost = TPS[current_node][best_neighbour_index]
best_neighbour = neighbours[best_neighbour_index]
else:
# error
cost = -100
best_neighbour = -100
return (best_neighbour, cost)
def remove_award(current_node, next_node):
next_node_index = VOIS[current_node].index(next_node)
# the distance will be zero
DIST[current_node][next_node_index] = 0
if current_node in VOIS[next_node]:
current_node_index = VOIS[next_node].index(current_node)
DIST[next_node][current_node_index] = 0
print CARS
# CAR par CAR
for CAR in range(CARS):
visited_nodes = []
current_node = STARTPOINT
current_time = 0
visited_nodes.append(current_node)
while current_time < TIME:
# choose a neighbour
next_node, time = best_neighbour(current_node, current_time)
if next_node == -100:
break
else:
# we was here, so we remove award
remove_award(current_node, next_node)
visited_nodes.append(next_node)
current_node = next_node
current_time = current_time + time
# output for that CAR
# print len(visited_nodes)
print len(visited_nodes)
for n in visited_nodes:
print n
| null | null | null | null |
[
0
] |
2,168 |
bd179fda18551d4f3d8a4d695a9da38ee607ef1d
|
<mask token>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
<mask token>
<mask token>
<mask token>
<mask token>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
<mask token>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<mask token>
<mask token>
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<mask token>
<mask token>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
|
<mask token>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
<mask token>
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
<mask token>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<mask token>
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<mask token>
<mask token>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
|
<mask token>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
<mask token>
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
<mask token>
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = 'test-bucket'
aw1_file_name = (
'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')
aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'
aw2_file_name = (
'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')
aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=
clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)
aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.
SUCCESS)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
self.data_generator.create_database_genomic_aw1_raw(file_path=
aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=
'A10001')
self.data_generator.create_database_genomic_aw2_raw(file_path=
aw2_manifest_path, biobank_id='A10001', sample_id='100001',
biobankidsampleid='A10001_100001')
aw1_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1, filePath=
aw1_manifest_path, fileName=aw1_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw2_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2, filePath=
aw2_manifest_path, fileName=aw2_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw1_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw1_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',
bucketName=bucket_name, fileName=aw1_file_name))
aw2_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw2_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',
bucketName=bucket_name, fileName=aw2_file_name))
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
aw2_file_processed.id)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task',
'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = (self.data_generator.
create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1))
stored_sample = (self.data_generator.
create_database_biobank_stored_sample(biobankId=summary
.biobankId, biobankOrderIdentifier=self.fake.pyint()))
collection_site = self.data_generator.create_database_site(
siteType='Clinic')
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId, participantId=
summary.participantId, finalizedTime=plus_num)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='1')
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='2')
member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, participantId=summary.participantId, genomeType=
config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood', collectionTubeId=
stored_sample.biobankStoredSampleId))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id, sexConcordance='True',
drcFpConcordance='Pass', drcSexConcordance='Pass',
processingStatus='Pass')
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for
obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [
calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.
informingLoopReadyFlag == 1 and obj.
informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=
gen_job_run.id if num % 2 == 0 else None)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=
gen_job_run.id)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,
'[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in
call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in
call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in
all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1,
all_withdrawal_records))
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1,
all_withdrawal_records))
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, participantId=summary.
participantId, genomeType=config.GENOME_TYPE_ARRAY)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = (GenomicWorkflowState.
GEM_RPT_READY)
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in
current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in
current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state == GenomicReportState.
GEM_RPT_READY for obj in current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state_str ==
GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states))
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
<mask token>
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
<mask token>
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<mask token>
<mask token>
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
|
<mask token>
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = '1' * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={'text': incident_message})
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
file_path_md5 = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum'
)
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName='test_bucket', fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5,
bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = 'test_bucket'
file_path = (
'Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz'
)
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='111111111', sampleId=
'222222222222', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=gen_job_run.id,
startTime=clock.CLOCK.now(), filePath='/test_file_path',
bucketName=bucket_name, fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
gen_processed_file.id)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name
)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.
UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message,
'INGEST_DATA_FILES: Cannot find genomics metric record for sample id: 21042005280'
)
def test_accession_data_files(self):
test_bucket_baylor = 'fake-data-bucket-baylor'
test_idat_file = (
'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat'
)
test_vcf_file = (
'fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz'
)
test_cram_file = (
'fake-data-bucket-baylor/Wgs_sample_raw_data/CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram'
)
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES
) as controller:
controller.accession_data_files(file_path,
test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
expected_idat = GenomicGcDataFile(id=1, created=test_time, modified
=test_time, file_path=test_idat_file, gc_site_id='jh',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Genotyping_sample_raw_data', file_name=
'204027270091_R02C01_Grn.idat', file_type='Grn.idat',
identifier_type='chipwellbarcode', identifier_value=
'204027270091_R02C01', ignore_flag=0)
expected_vcf = GenomicGcDataFile(id=2, created=test_time, modified=
test_time, file_path=test_vcf_file, gc_site_id='jh',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Genotyping_sample_raw_data', file_name=
'204027270091_R02C01.vcf.gz', file_type='vcf.gz',
identifier_type='chipwellbarcode', identifier_value=
'204027270091_R02C01', ignore_flag=0)
expected_cram = GenomicGcDataFile(id=3, created=test_time, modified
=test_time, file_path=test_cram_file, gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor', file_prefix=
'Wgs_sample_raw_data/CRAMs_CRAIs', file_name=
'BCM_A100134256_21063006771_SIA0017196_1.cram', file_type=
'cram', identifier_type='sample_id', identifier_value=
'21063006771', ignore_flag=0)
expected_objs = {(0): expected_idat, (1): expected_vcf, (2):
expected_cram}
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i
].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].
created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].
file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].
file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i
].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].
file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i]
.gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type,
inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value,
inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i
].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].
metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].
modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
ids_should_be_updated = []
for i in range(4):
ids_should_be_updated.append(self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set.id,
biobankId='100153482', sampleId='21042005280', genomeType=
'test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0, ai_an='Y' if
i & 2 == 0 else 'N').id)
for i in range(2):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_array', genomicWorkflowState=
GenomicWorkflowState.AW0, ai_an='N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.
blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in
blocklisted].sort())
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in created_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in created_members if obj.genomeType ==
'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResearch == 0 and obj.
blockResearchReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in created_members if obj.
genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0))
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
for i in range(4):
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, biobankId='100153482', sampleId='21042005280',
genomeType='test_investigation_one' if i & 2 != 0 else
'aou_wgs', genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N')
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS
) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'aian' for obj in modified_members if obj.ai_an == 'Y' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 0 and obj.
blockResultsReason is None for obj in modified_members if obj.
ai_an == 'Y' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResearch == 1 and obj.
blockResearchReason is not None and obj.blockResearchReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
self.assertTrue(all(obj.blockResults == 1 and obj.
blockResultsReason is not None and obj.blockResultsReason ==
'test_sample_swap' for obj in modified_members if obj.
genomeType == 'test_investigation_one' and obj.
genomicWorkflowState == GenomicWorkflowState.AW1))
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.
COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(test_file,
bucket_name, sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST
) as controller:
controller.ingest_metrics_file(metric_type='user_events',
file_path=test_file_path)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].
split('P')[-1]) == pid, metrics_to_ingest['rows']))
participant_ingested_metrics = list(filter(lambda x: x.
participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(
participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in
participant_ingested_metrics))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_reconcile_pdr_data(self, mock_cloud_task):
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.
model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in
first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, biobankId='100153482', sampleId='21042005280',
genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1))
gen_processed_file = (self.data_generator.
create_database_genomic_file_processed(runId=first_run[
0].id, startTime=clock.CLOCK.now(), filePath=
f'test_file_path_{i}', bucketName='test_bucket',
fileName='test_file_name'))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id)
manifest = (self.data_generator.
create_database_genomic_manifest_file(manifestTypeId=2,
filePath=f'test_file_path_{i}'))
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id, feedbackRecordCount=2)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId, event_name=
'test_event', run_id=1)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1, event_type=
'informing_loop_decision', module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later', event_authored_time=clock
.CLOCK.now())
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co', email_notification_sent=0, sample_id=
'sample_test', results_type='hdr',
genomic_set_member_id=gen_member.id)
self.data_generator.create_database_genomic_appointment(
message_record_id=i, appointment_id=i, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(), source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id, participant_id=
participant.participantId, module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now())
self.data_generator.create_genomic_result_viewed(participant_id
=participant.participantId, event_type='result_viewed',
event_authored_time=clock.CLOCK.now(), module_type=
'gem', sample_id=gen_member.sampleId)
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = ['genomic_set', 'genomic_set_member',
'genomic_job_run', 'genomic_file_processed',
'genomic_gc_validation_metrics', 'genomic_manifest_file',
'genomic_manifest_feedback', 'genomic_informing_loop',
'genomic_cvl_results_past_due', 'user_event_metrics',
'genomic_member_report_state', 'genomic_result_viewed',
'genomic_appointment_event']
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = 'test-bucket'
aw1_file_name = (
'AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv')
aw1_manifest_path = f'{bucket_name}/{aw1_file_name}'
aw2_file_name = (
'AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv')
aw2_manifest_path = f'{bucket_name}/{aw2_file_name}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
aw1_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(), endTime=
clock.CLOCK.now(), runResult=GenomicSubProcessResult.SUCCESS)
aw2_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.METRICS_INGESTION, startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(), runResult=GenomicSubProcessResult.
SUCCESS)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
self.data_generator.create_database_genomic_aw1_raw(file_path=
aw1_manifest_path, package_id='PKG-2104-026571', biobank_id=
'A10001')
self.data_generator.create_database_genomic_aw2_raw(file_path=
aw2_manifest_path, biobank_id='A10001', sample_id='100001',
biobankidsampleid='A10001_100001')
aw1_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1, filePath=
aw1_manifest_path, fileName=aw1_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw2_manifest_file = (self.data_generator.
create_database_genomic_manifest_file(created=clock.CLOCK.now(),
modified=clock.CLOCK.now(), uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2, filePath=
aw2_manifest_path, fileName=aw2_file_name, bucketName=
bucket_name, recordCount=1, rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now()))
aw1_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw1_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw1_manifest_file.id, filePath=f'/{aw1_manifest_path}',
bucketName=bucket_name, fileName=aw1_file_name))
aw2_file_processed = (self.data_generator.
create_database_genomic_file_processed(runId=aw2_job_run.id,
startTime=clock.CLOCK.now(), genomicManifestFileId=
aw2_manifest_file.id, filePath=f'/{aw2_manifest_path}',
bucketName=bucket_name, fileName=aw2_file_name))
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, biobankId='100153482', sampleId=
'21042005280', genomeType='aou_wgs', genomicWorkflowState=
GenomicWorkflowState.AW1, aw1FileProcessedId=aw1_file_processed.id)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id, genomicFileProcessedId=
aw2_file_processed.id)
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS
) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task',
'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj ==
cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = (self.data_generator.
create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1))
stored_sample = (self.data_generator.
create_database_biobank_stored_sample(biobankId=summary
.biobankId, biobankOrderIdentifier=self.fake.pyint()))
collection_site = self.data_generator.create_database_site(
siteType='Clinic')
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId, participantId=
summary.participantId, finalizedTime=plus_num)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='1')
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId, system='2')
member = (self.data_generator.
create_database_genomic_set_member(genomicSetId=gen_set
.id, participantId=summary.participantId, genomeType=
config.GENOME_TYPE_WGS, qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood', collectionTubeId=
stored_sample.biobankStoredSampleId))
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id, sexConcordance='True',
drcFpConcordance='Pass', drcSexConcordance='Pass',
processingStatus='Pass')
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for
obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [
calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.
informingLoopReadyFlag == 1 and obj.
informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY
) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in
current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for
obj in current_set_members))
members_for_ready_loop = (self.member_dao.
get_members_for_informing_loop_ready())
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gen_job_run = self.data_generator.create_database_genomic_job_run(jobId
=GenomicJob.AW1_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY, gemA1ManifestJobRunId=
gen_job_run.id if num % 2 == 0 else None)
self.data_generator.create_database_genomic_set_member(genomicSetId
=gen_set.id, participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS, cvlW1ilHdrJobRunId=
gen_job_run.id)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL,
'[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in
call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in
call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in
all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1,
all_withdrawal_records))
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1,
all_withdrawal_records))
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS
) as controller:
controller.check_results_withdrawals()
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName='.', genomicSetCriteria='.', genomicSetVersion=1)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST, startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id, participantId=summary.
participantId, genomeType=config.GENOME_TYPE_ARRAY)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = (GenomicWorkflowState.
GEM_RPT_READY)
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in
current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in
current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state == GenomicReportState.
GEM_RPT_READY for obj in current_gem_report_states))
self.assertTrue(all(obj.genomic_report_state_str ==
GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states))
self.assertTrue(all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states))
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.
GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate()
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
for b in ['aou_array', 'aou_wgs']:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType=b)
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes', 'hdr.informing_loop.started',
'gem.informing_loop.screen3', 'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(), modified=clock.CLOCK.now(),
participant_id=p + 1, created_at=datetime.datetime(2021,
12, 29, 0) + datetime.timedelta(hours=i), event_name=
events[i], run_id=1, ignore_flag=0)
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i, event_type=
'informing_loop_started' if i == 0 else
'informing_loop_decision', module_type='gem',
participant_id=p + 1, decision_value=decisions[i],
sample_id=100 + p, event_authored_time=datetime.
datetime(2021, 12, 29, 0) + datetime.timedelta(hours=i))
self.data_generator.create_database_genomic_user_event_metrics(created
=clock.CLOCK.now(), modified=clock.CLOCK.now(), participant_id=
6, created_at=datetime.datetime(2021, 12, 29, 0), event_name=
'gem.informing_loop.screen8_yes', run_id=1, ignore_flag=0)
genomic_pipeline.reconcile_informing_loop_responses()
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,
module='gem')
for value in new_il_values:
self.assertEqual('yes', value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ['hdr', 'pgx']:
new_il_values = il_dao.get_latest_il_for_pids(pid_list=pid_list,
module=module)
for value in new_il_values:
self.assertEqual('no', value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.result_ready', run_id=1)
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.informative', run_id=1)
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.result_ready.uninformative',
run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == 'pgx_v1']
hdr_record_uninf = [rec for rec in states if rec.
genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0
]
hdr_record_pos = [rec for rec in states if rec.genomic_report_state ==
GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.
genomic_report_state)
self.assertEqual('PGX_RPT_READY', pgx_record.
genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.
participant_id + 10)
self.assertEqual('result_ready', pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), pgx_record.
event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual('HDR_RPT_UNINFORMATIVE', hdr_record_uninf.
genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0),
hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual('HDR_RPT_POSITIVE', hdr_record_pos.
genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.
participant_id + 10)
self.assertEqual('result_ready', hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), hdr_record_pos.
event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
self.data_generator.create_database_genomic_job_run(jobId=
GenomicJob.METRICS_FILE_INGEST, startTime=clock.CLOCK.now())
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 +
pid, biobankId=1 + pid)
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i, genomicSetId=1, biobankId=i,
collectionTubeId=100 + i, sampleId=10 + i, genomeType='aou_wgs'
)
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='pgx.opened_at', run_id=1)
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i, created_at=datetime.datetime(2022, 10,
6, 0), event_name='hdr.opened_at', run_id=1)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual('pgx_v1', record.module_type)
else:
self.assertEqual('hdr_v1', record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual('result_viewed', record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 0), record.
first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
'Genomic-Metrics-File-Appointment-Events-Test.json')
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode('utf-8'))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST
) as controller:
controller.ingest_appointment_metrics_file(file_path=test_file_path
)
all_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all(obj.participant_id in pids for obj in all_metrics))
self.assertTrue(all(obj.file_path == test_file_path for obj in
all_metrics))
self.assertTrue(all(obj.appointment_event is not None for obj in
all_metrics))
self.assertTrue(all(obj.created is not None for obj in all_metrics))
self.assertTrue(all(obj.modified is not None for obj in all_metrics))
self.assertTrue(all(obj.module_type is not None for obj in all_metrics)
)
self.assertTrue(all(obj.event_authored_time is not None for obj in
all_metrics))
self.assertTrue(all(obj.event_type is not None for obj in all_metrics))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.
APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult ==
GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {'event': 'appointment_updated',
'eventAuthoredTime': '2022-09-16T17:18:38Z',
'participantId': f'P{summary.participantId}', 'messageBody':
{'module_type': 'hdr', 'appointment_timestamp':
'2022-09-19T19:30:00+00:00', 'id': 55,
'appointment_timezone': 'America/Los_Angeles', 'location':
'CA', 'contact_number': '18043704252', 'language': 'en',
'source': 'Color'}}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()
), appointment_timezone='America/Los_Angeles', location
='123 address st', contact_number='17348675309',
language='en')
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId, appointment_event=
json.dumps(missing_json, indent=4) if num % 2 != 0 else
'foo', file_path='test_file_path', module_type='hdr',
event_authored_time=fake_date, event_type=
'appointment_updated' if num % 2 != 0 else
'appointment_scheduled')
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in
current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE
) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.
APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type ==
'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in
scheduled))
updated = list(filter(lambda x: x.event_type ==
'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in
updated))
current_metrics = self.appointment_metrics_dao.get_all()
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in
current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for
obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, [
'[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=gror)
self.data_generator.create_database_genomic_appointment(
message_record_id=num, appointment_id=num, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date,
source='Color', appointment_timestamp=format_datetime(clock
.CLOCK.now()), appointment_timezone='America/Los_Angeles',
location='123 address st', contact_number='17348675309',
language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED
) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=2)
self.data_generator.create_database_genomic_appointment(
message_record_id=5, appointment_id=5, event_type=
'appointment_scheduled', module_type='hdr', participant_id=
summary.participantId, event_authored_time=fake_date, source=
'Color', appointment_timestamp=format_datetime(clock.CLOCK.now(
)), appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
changed_ppts = (self.appointment_event_dao.
get_appointments_gror_changed())
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=14))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
<mask token>
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse('2022-09-01T13:43:23')
fake_date2 = parser.parse('2022-09-02T14:14:00')
fake_date3 = parser.parse('2022-09-03T15:15:00')
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, [
'[email protected]'])
self.data_generator.create_database_genomic_set(genomicSetName=
'test', genomicSetCriteria='.', genomicSetVersion=1)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1, consentForGenomicsROR=1)
set_member = (self.data_generator.
create_database_genomic_set_member(participantId=summary.
participantId, genomicSetId=1, biobankId=1001,
collectionTubeId=100, sampleId=10, genomeType='aou_wgs',
participantOrigin='careevolution'))
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId, genomic_report_state=
GenomicReportState.HDR_RPT_POSITIVE, genomic_set_member_id=
set_member.id, module='hdr_v1', event_authored_time=fake_date)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=101, appointment_id=102, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[0], event_authored_time=fake_date, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=102, appointment_id=103, event_type=
'appointment_completed', module_type='hdr', participant_id=pids
[1], event_authored_time=fake_date, source='Color',
appointment_timestamp=fake_date, appointment_timezone=
'America/Los_Angeles', location='123 address st',
contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=103, appointment_id=104, event_type=
'appointment_scheduled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date2, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
self.data_generator.create_database_genomic_appointment(
message_record_id=104, appointment_id=104, event_type=
'appointment_cancelled', module_type='hdr', participant_id=pids
[2], event_authored_time=fake_date3, source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles', location=
'123 address st', contact_number='17348675309', language='en')
notified_dao = GenomicDefaultBaseDao(model_type=
GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{'participant_id': pids[4], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': True}, {'participant_id': pids[5], 'created':
clock.CLOCK.now(), 'modified': clock.CLOCK.now(),
'message_sent': False}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = (self.report_state_dao.
get_hdr_result_positive_no_appointment(num_days=30,
participant_origin='careevolution'))
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION
) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject,
'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch(
'rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task'
)
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR
)
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(GenomicJob.PR_PR_WORKFLOW) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(
job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.
SUCCESS)
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get(
'manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') ==
'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.
SUCCESS, GenomicSubProcessResult.ERROR] for obj in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in
all_job_runs))
|
import datetime
import json
from dateutil import parser
import mock
from python_http_client.exceptions import ForbiddenError
from rdr_service import clock, config
from rdr_service.api_util import open_cloud_file
from rdr_service.clock import FakeClock
from rdr_service.dao.database_utils import format_datetime
from rdr_service.dao.genomics_dao import GenomicGcDataFileDao, GenomicGCValidationMetricsDao, GenomicIncidentDao, \
GenomicSetMemberDao, UserEventMetricsDao, GenomicJobRunDao, GenomicResultWithdrawalsDao, \
GenomicMemberReportStateDao, GenomicAppointmentEventMetricsDao, GenomicAppointmentEventDao, GenomicResultViewedDao, \
GenomicInformingLoopDao, GenomicAppointmentEventNotifiedDao, GenomicDefaultBaseDao
from rdr_service.dao.message_broker_dao import MessageBrokenEventDataDao
from rdr_service.genomic_enums import GenomicIncidentCode, GenomicJob, GenomicWorkflowState, GenomicSubProcessResult, \
GenomicSubProcessStatus, GenomicManifestTypes, GenomicQcStatus, GenomicReportState
from rdr_service.genomic.genomic_job_components import GenomicFileIngester
from rdr_service.genomic.genomic_job_controller import GenomicJobController
from rdr_service.model.genomics import GenomicGcDataFile, GenomicIncident, GenomicSetMember, GenomicGCValidationMetrics,\
GenomicGCROutreachEscalationNotified
from rdr_service.offline.genomics import genomic_pipeline, genomic_cvl_pipeline
from rdr_service.participant_enums import WithdrawalStatus
from tests import test_data
from tests.genomics_tests.test_genomic_utils import create_ingestion_test_file
from tests.helpers.unittest_base import BaseTestCase
class GenomicJobControllerTest(BaseTestCase):
def setUp(self):
super(GenomicJobControllerTest, self).setUp()
self.data_file_dao = GenomicGcDataFileDao()
self.event_data_dao = MessageBrokenEventDataDao()
self.incident_dao = GenomicIncidentDao()
self.member_dao = GenomicSetMemberDao()
self.metrics_dao = GenomicGCValidationMetricsDao()
self.user_event_metrics_dao = UserEventMetricsDao()
self.job_run_dao = GenomicJobRunDao()
self.report_state_dao = GenomicMemberReportStateDao()
self.appointment_event_dao = GenomicAppointmentEventDao()
self.appointment_metrics_dao = GenomicAppointmentEventMetricsDao()
def test_incident_with_long_message(self):
"""Make sure the length of incident messages doesn't cause issues when recording them"""
incident_message = "1" * (GenomicIncident.message.type.length + 20)
mock_slack_handler = mock.MagicMock()
job_controller = GenomicJobController(job_id=1)
job_controller.genomic_alert_slack = mock_slack_handler
job_controller.create_incident(message=incident_message, slack=True)
# Double check that the incident was saved successfully, with part of the message
incident: GenomicIncident = self.session.query(GenomicIncident).one()
self.assertTrue(incident_message.startswith(incident.message))
# Make sure Slack received the full message
mock_slack_handler.send_message_to_webhook.assert_called_with(
message_data={
'text': incident_message
}
)
def test_gvcf_files_ingestion(self):
job_controller = GenomicJobController(job_id=38)
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
file_path_md5 = "Wgs_sample_raw_data/SS_VCF_research/" \
"BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz.md5sum"
full_path = f'{bucket_name}/{file_path}'
full_path_md5 = f'{bucket_name}/{file_path_md5}'
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
job_controller.ingest_data_files_into_gc_metrics(file_path_md5, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfMd5Path)
self.assertEqual(metrics.gvcfMd5Path, full_path_md5)
job_controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
metrics = self.metrics_dao.get_metrics_by_member_id(gen_member.id)
self.assertIsNotNone(metrics.gvcfPath)
self.assertEqual(metrics.gvcfPath, full_path)
def test_gvcf_files_ingestion_create_incident(self):
bucket_name = "test_bucket"
file_path = "Wgs_sample_raw_data/SS_VCF_research/BCM_A100153482_21042005280_SIA0013441__1.hard-filtered.gvcf.gz"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="111111111",
sampleId="222222222222",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=gen_job_run.id,
startTime=clock.CLOCK.now(),
filePath='/test_file_path',
bucketName=bucket_name,
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
with GenomicJobController(GenomicJob.INGEST_DATA_FILES) as controller:
controller.ingest_data_files_into_gc_metrics(file_path, bucket_name)
incident = self.incident_dao.get(1)
self.assertIsNotNone(incident)
self.assertEqual(incident.code, GenomicIncidentCode.UNABLE_TO_FIND_METRIC.name)
self.assertEqual(incident.data_file_path, file_path)
self.assertEqual(incident.message, 'INGEST_DATA_FILES: Cannot find '
'genomics metric record for sample id: '
'21042005280')
def test_accession_data_files(self):
test_bucket_baylor = "fake-data-bucket-baylor"
test_idat_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01_Grn.idat"
test_vcf_file = "fake-data-bucket-baylor/Genotyping_sample_raw_data/204027270091_R02C01.vcf.gz"
test_cram_file = "fake-data-bucket-baylor/Wgs_sample_raw_data/" \
"CRAMs_CRAIs/BCM_A100134256_21063006771_SIA0017196_1.cram"
test_files = [test_idat_file, test_vcf_file, test_cram_file]
test_time = datetime.datetime(2021, 7, 9, 14, 1, 1)
# run job controller method on each file
with clock.FakeClock(test_time):
for file_path in test_files:
with GenomicJobController(GenomicJob.ACCESSION_DATA_FILES) as controller:
controller.accession_data_files(file_path, test_bucket_baylor)
inserted_files = self.data_file_dao.get_all()
# idat
expected_idat = GenomicGcDataFile(
id=1,
created=test_time,
modified=test_time,
file_path=test_idat_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01_Grn.idat',
file_type='Grn.idat',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# vcf
expected_vcf = GenomicGcDataFile(
id=2,
created=test_time,
modified=test_time,
file_path=test_vcf_file,
gc_site_id='jh',
bucket_name='fake-data-bucket-baylor',
file_prefix='Genotyping_sample_raw_data',
file_name='204027270091_R02C01.vcf.gz',
file_type='vcf.gz',
identifier_type='chipwellbarcode',
identifier_value='204027270091_R02C01',
ignore_flag=0,
)
# cram
expected_cram = GenomicGcDataFile(
id=3,
created=test_time,
modified=test_time,
file_path=test_cram_file,
gc_site_id='bcm',
bucket_name='fake-data-bucket-baylor',
file_prefix='Wgs_sample_raw_data/CRAMs_CRAIs',
file_name='BCM_A100134256_21063006771_SIA0017196_1.cram',
file_type='cram',
identifier_type='sample_id',
identifier_value='21063006771',
ignore_flag=0,
)
# obj mapping
expected_objs = {
0: expected_idat,
1: expected_vcf,
2: expected_cram
}
# verify test objects match expectations
for i in range(3):
self.assertEqual(expected_objs[i].bucket_name, inserted_files[i].bucket_name)
self.assertEqual(expected_objs[i].created, inserted_files[i].created)
self.assertEqual(expected_objs[i].file_name, inserted_files[i].file_name)
self.assertEqual(expected_objs[i].file_path, inserted_files[i].file_path)
self.assertEqual(expected_objs[i].file_prefix, inserted_files[i].file_prefix)
self.assertEqual(expected_objs[i].file_type, inserted_files[i].file_type)
self.assertEqual(expected_objs[i].gc_site_id, inserted_files[i].gc_site_id)
self.assertEqual(expected_objs[i].id, inserted_files[i].id)
self.assertEqual(expected_objs[i].identifier_type, inserted_files[i].identifier_type)
self.assertEqual(expected_objs[i].identifier_value, inserted_files[i].identifier_value)
self.assertEqual(expected_objs[i].ignore_flag, inserted_files[i].ignore_flag)
self.assertEqual(expected_objs[i].metadata, inserted_files[i].metadata)
self.assertEqual(expected_objs[i].modified, inserted_files[i].modified)
def test_updating_members_blocklists(self):
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
ids_should_be_updated = []
# for just created and wf state query and MATCHES criteria
for i in range(4):
ids_should_be_updated.append(
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='Y' if i & 2 == 0 else 'N'
).id
)
# for just created and wf state query and DOES NOT MATCH criteria
for i in range(2):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='aou_array',
genomicWorkflowState=GenomicWorkflowState.AW0,
ai_an='N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
# current config json in base_config.json
created_members = self.member_dao.get_all()
blocklisted = list(filter(lambda x: x.blockResults == 1 or x.blockResearch == 1, created_members))
self.assertTrue(ids_should_be_updated.sort() == [obj.id for obj in blocklisted].sort())
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW0)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in created_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# should NOT be RESEARCH/RESULTS blocked
self.assertTrue(all(
obj.blockResearch == 0 and obj.blockResearchReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in created_members if obj.genomeType == 'aou_array' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW0)
)
# clear current set member records
with self.member_dao.session() as session:
session.query(GenomicSetMember).delete()
run_result = self.job_run_dao.get(1)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
# for modified data query and MATCHES criteria
for i in range(4):
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType='test_investigation_one' if i & 2 != 0 else 'aou_wgs',
genomicWorkflowState=GenomicWorkflowState.AW1,
ai_an='Y' if i & 2 == 0 else 'N'
)
with GenomicJobController(GenomicJob.UPDATE_MEMBERS_BLOCKLISTS) as controller:
controller.update_members_blocklists()
modified_members = self.member_dao.get_all()
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'aian'
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should NOT be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 0 and obj.blockResultsReason is None
for obj in modified_members if obj.ai_an == 'Y' and obj.genomicWorkflowState == GenomicWorkflowState.AW1)
)
# should be RESEARCH blocked
self.assertTrue(all(
obj.blockResearch == 1 and obj.blockResearchReason is not None and obj.blockResearchReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
# should be RESULTS blocked
self.assertTrue(all(
obj.blockResults == 1 and obj.blockResultsReason is not None and obj.blockResultsReason == 'test_sample_swap'
for obj in modified_members if obj.genomeType == 'test_investigation_one' and obj.genomicWorkflowState ==
GenomicWorkflowState.AW1)
)
run_result = self.job_run_dao.get(2)
self.assertEqual(run_result.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(run_result.runResult, GenomicSubProcessResult.SUCCESS)
def test_ingest_user_metrics_file(self):
test_file = 'Genomic-Metrics-File-User-Events-Test.csv'
bucket_name = 'test_bucket'
sub_folder = 'user_events'
pids = []
file_ingester = GenomicFileIngester()
for _ in range(2):
pid = self.data_generator.create_database_participant()
pids.append(pid.participantId)
test_metrics_file = create_ingestion_test_file(
test_file,
bucket_name,
sub_folder)
test_file_path = f'{bucket_name}/{sub_folder}/{test_metrics_file}'
with open_cloud_file(test_file_path) as csv_file:
metrics_to_ingest = file_ingester._read_data_to_ingest(csv_file)
with GenomicJobController(GenomicJob.METRICS_FILE_INGEST) as controller:
controller.ingest_metrics_file(
metric_type='user_events',
file_path=test_file_path,
)
job_run_id = controller.job_run.id
metrics = self.user_event_metrics_dao.get_all()
for pid in pids:
file_metrics = list(filter(lambda x: int(x['participant_id'].split('P')[-1]) == pid, metrics_to_ingest[
'rows']))
participant_ingested_metrics = list(filter(lambda x: x.participant_id == pid, metrics))
self.assertEqual(len(file_metrics), len(participant_ingested_metrics))
self.assertTrue(all(obj.run_id == job_run_id for obj in participant_ingested_metrics))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_reconcile_pdr_data(self, mock_cloud_task):
# init new job run in __enter__
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
cloud_task_endpoint = 'rebuild_genomic_table_records_task'
first_run = self.job_run_dao.get_all()
self.assertEqual(mock_cloud_task.call_count, 1)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 1)
self.assertEqual(call_args[0].args[0]['table'], self.job_run_dao.model_type.__tablename__)
self.assertTrue(type(call_args[0].args[0]['ids']) is list)
self.assertEqual(call_args[0].args[0]['ids'], [obj.id for obj in first_run])
self.assertEqual(call_args[0].args[1], cloud_task_endpoint)
participant = self.data_generator.create_database_participant()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
plus_ten = clock.CLOCK.now() + datetime.timedelta(minutes=10)
plus_ten = plus_ten.replace(microsecond=0)
with FakeClock(plus_ten):
for i in range(2):
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1
)
gen_processed_file = self.data_generator.create_database_genomic_file_processed(
runId=first_run[0].id,
startTime=clock.CLOCK.now(),
filePath=f'test_file_path_{i}',
bucketName='test_bucket',
fileName='test_file_name',
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=gen_processed_file.id
)
manifest = self.data_generator.create_database_genomic_manifest_file(
manifestTypeId=2,
filePath=f'test_file_path_{i}'
)
self.data_generator.create_database_genomic_manifest_feedback(
inputManifestFileId=manifest.id,
feedbackRecordCount=2
)
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=participant.participantId,
event_name='test_event',
run_id=1,
)
self.data_generator.create_database_genomic_informing_loop(
message_record_id=1,
event_type='informing_loop_decision',
module_type='gem',
participant_id=participant.participantId,
decision_value='maybe_later',
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_database_genomic_cvl_past_due(
cvl_site_id='co',
email_notification_sent=0,
sample_id='sample_test',
results_type='hdr',
genomic_set_member_id=gen_member.id
)
self.data_generator.create_database_genomic_appointment(
message_record_id=i,
appointment_id=i,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=participant.participantId,
event_authored_time=clock.CLOCK.now(),
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_member_report_state(
genomic_set_member_id=gen_member.id,
participant_id=participant.participantId,
module='gem',
genomic_report_state=GenomicReportState.GEM_RPT_READY,
event_authored_time=clock.CLOCK.now()
)
self.data_generator.create_genomic_result_viewed(
participant_id=participant.participantId,
event_type='result_viewed',
event_authored_time=clock.CLOCK.now(),
module_type='gem',
sample_id=gen_member.sampleId
)
# gets new records that were created with last job run from above
with GenomicJobController(GenomicJob.RECONCILE_PDR_DATA) as controller:
controller.reconcile_pdr_data()
affected_tables = [
'genomic_set',
'genomic_set_member',
'genomic_job_run',
'genomic_file_processed',
'genomic_gc_validation_metrics',
'genomic_manifest_file',
'genomic_manifest_feedback',
'genomic_informing_loop',
'genomic_cvl_results_past_due',
'user_event_metrics',
'genomic_member_report_state',
'genomic_result_viewed',
'genomic_appointment_event'
]
num_calls = len(affected_tables) + 1
self.assertEqual(mock_cloud_task.call_count, num_calls)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), num_calls)
mock_tables = set([obj[0][0]['table'] for obj in call_args])
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue([mock_tables].sort() == affected_tables.sort())
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_retry_manifest_ingestions_if_deltas(self, mock_cloud_task):
bucket_name = "test-bucket"
aw1_file_name = "AW1_wgs_sample_manifests/RDR_AoU_SEQ_PKG-2104-026571.csv"
aw1_manifest_path = f"{bucket_name}/{aw1_file_name}"
aw2_file_name = "AW2_wgs_data_manifests/RDR_AoU_SEQ_DataManifest_04092021.csv"
aw2_manifest_path = f"{bucket_name}/{aw2_file_name}"
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
# Create AW1 job_run
aw1_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# Create AW2 job_run
aw2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_INGESTION,
startTime=clock.CLOCK.now(),
endTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
# should have no data
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(3)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# Create genomic_aw1_raw record
self.data_generator.create_database_genomic_aw1_raw(
file_path=aw1_manifest_path,
package_id="PKG-2104-026571",
biobank_id="A10001",
)
# Create genomic_aw2_raw record
self.data_generator.create_database_genomic_aw2_raw(
file_path=aw2_manifest_path,
biobank_id="A10001",
sample_id="100001",
biobankidsampleid="A10001_100001",
)
# Create AW1 genomic_manifest_file record
aw1_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW1,
filePath=aw1_manifest_path,
fileName=aw1_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW2 genomic_manifest_file record
aw2_manifest_file = self.data_generator.create_database_genomic_manifest_file(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
uploadDate=clock.CLOCK.now(),
manifestTypeId=GenomicManifestTypes.AW2,
filePath=aw2_manifest_path,
fileName=aw2_file_name,
bucketName=bucket_name,
recordCount=1,
rdrProcessingComplete=1,
rdrProcessingCompleteDate=clock.CLOCK.now(),
)
# Create AW1 file_processed
aw1_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw1_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw1_manifest_file.id,
filePath=f"/{aw1_manifest_path}",
bucketName=bucket_name,
fileName=aw1_file_name,
)
# Create AW2 file_processed
aw2_file_processed = self.data_generator.create_database_genomic_file_processed(
runId=aw2_job_run.id,
startTime=clock.CLOCK.now(),
genomicManifestFileId=aw2_manifest_file.id,
filePath=f"/{aw2_manifest_path}",
bucketName=bucket_name,
fileName=aw2_file_name,
)
# genomic_set_member for AW1
gen_member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
biobankId="100153482",
sampleId="21042005280",
genomeType="aou_wgs",
genomicWorkflowState=GenomicWorkflowState.AW1,
aw1FileProcessedId=aw1_file_processed.id
)
# genomic_gc_validation_metrics for AW1
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=gen_member.id,
genomicFileProcessedId=aw2_file_processed.id
)
# one AW1/AW2 with no deltas
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(4)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.NO_FILES)
self.assertEqual(mock_cloud_task.call_count, 0)
self.assertFalse(mock_cloud_task.call_count)
# empty tables resulting in deltas and cloud task calls
with self.member_dao.session() as session:
session.query(GenomicGCValidationMetrics).delete()
session.query(GenomicSetMember).delete()
with GenomicJobController(GenomicJob.RETRY_MANIFEST_INGESTIONS) as controller:
controller.retry_manifest_ingestions()
job_run = self.job_run_dao.get(5)
self.assertEqual(job_run.jobId, GenomicJob.RETRY_MANIFEST_INGESTIONS)
self.assertEqual(job_run.runStatus, GenomicSubProcessStatus.COMPLETED)
self.assertEqual(job_run.runResult, GenomicSubProcessResult.SUCCESS)
# one AW1/AW2 with deltas
self.assertEqual(mock_cloud_task.call_count, 2)
self.assertTrue(mock_cloud_task.call_count)
call_args = mock_cloud_task.call_args_list
self.assertEqual(len(call_args), 2)
cloud_task_endpoint = ['ingest_aw1_manifest_task', 'ingest_aw2_manifest_task']
mock_endpoint = [obj[0][1] for obj in call_args]
self.assertTrue(all(obj for obj in mock_endpoint if obj == cloud_task_endpoint))
mock_buckets = set([obj[0][0]['bucket_name'] for obj in call_args])
self.assertTrue(len(mock_buckets), 1)
self.assertTrue(list(mock_buckets)[0] == bucket_name)
def test_calculate_informing_loop_ready_flags(self):
num_participants = 4
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
for num in range(num_participants):
plus_num = clock.CLOCK.now() + datetime.timedelta(minutes=num)
plus_num = plus_num.replace(microsecond=0)
with FakeClock(plus_num):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
stored_sample = self.data_generator.create_database_biobank_stored_sample(
biobankId=summary.biobankId,
biobankOrderIdentifier=self.fake.pyint()
)
collection_site = self.data_generator.create_database_site(
siteType='Clinic'
)
order = self.data_generator.create_database_biobank_order(
collectedSiteId=collection_site.siteId,
participantId=summary.participantId,
finalizedTime=plus_num
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="1",
)
self.data_generator.create_database_biobank_order_identifier(
value=stored_sample.biobankOrderIdentifier,
biobankOrderId=order.biobankOrderId,
system="2",
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
qcStatus=GenomicQcStatus.PASS,
gcManifestSampleSource='Whole Blood',
collectionTubeId=stored_sample.biobankStoredSampleId
)
self.data_generator.create_database_genomic_gc_validation_metrics(
genomicSetMemberId=member.id,
sexConcordance='True',
drcFpConcordance='Pass',
drcSexConcordance='Pass',
processingStatus='Pass'
)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 0 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is None for obj in current_set_members))
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
# no config object, controller method should return
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants)
calculation_limit = 2
config.override_setting(config.CALCULATE_READY_FLAG_LIMIT, [calculation_limit])
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(any(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(any(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
current_loops_set = [obj for obj in current_set_members if obj.informingLoopReadyFlag == 1
and obj.informingLoopReadyFlagModified is not None]
self.assertEqual(len(current_loops_set), calculation_limit)
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), num_participants // 2)
with GenomicJobController(GenomicJob.CALCULATE_INFORMING_LOOP_READY) as controller:
controller.calculate_informing_loop_ready_flags()
current_set_members = self.member_dao.get_all()
self.assertTrue(all(obj.informingLoopReadyFlag == 1 for obj in current_set_members))
self.assertTrue(all(obj.informingLoopReadyFlagModified is not None for obj in current_set_members))
members_for_ready_loop = self.member_dao.get_members_for_informing_loop_ready()
self.assertEqual(len(members_for_ready_loop), 0)
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_getting_results_withdrawn(self, email_mock):
num_participants = 4
result_withdrawal_dao = GenomicResultWithdrawalsDao()
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gen_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.AW1_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids = []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY,
gemA1ManifestJobRunId=gen_job_run.id if num % 2 == 0 else None
)
self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_WGS,
cvlW1ilHdrJobRunId=gen_job_run.id
)
pids.append(summary.participantId)
config.override_setting(config.RDR_GENOMICS_NOTIFICATION_EMAIL, '[email protected]')
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should be two => 1 GEM 1 HEALTH
self.assertEqual(email_mock.call_count, 2)
call_args = email_mock.call_args_list
self.assertTrue(any('GEM' in call.args[0].subject for call in call_args))
self.assertTrue(any('HEALTH' in call.args[0].subject for call in call_args))
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
all_withdrawal_records = result_withdrawal_dao.get_all()
self.assertTrue(len(all_withdrawal_records) == len(pids))
self.assertTrue(all(obj.participant_id in pids for obj in all_withdrawal_records))
array_results = list(filter(lambda x: x.array_results == 1, all_withdrawal_records))
# should only be 2
self.assertTrue(len(array_results), 2)
cvl_results = list(filter(lambda x: x.cvl_results == 1, all_withdrawal_records))
# should be 4 for num of participants
self.assertTrue(len(cvl_results), num_participants)
with GenomicJobController(GenomicJob.RESULTS_PIPELINE_WITHDRAWALS) as controller:
controller.check_results_withdrawals()
# mock checks should still be two on account of no records
self.assertEqual(email_mock.call_count, 2)
job_runs = self.job_run_dao.get_all()
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.RESULTS_PIPELINE_WITHDRAWALS, job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
def test_gem_results_to_report_state(self):
num_participants = 8
gen_set = self.data_generator.create_database_genomic_set(
genomicSetName=".",
genomicSetCriteria=".",
genomicSetVersion=1
)
gem_a2_job_run = self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.GEM_A2_MANIFEST,
startTime=clock.CLOCK.now(),
runResult=GenomicSubProcessResult.SUCCESS
)
pids_to_update, member_ids = [], []
for num in range(num_participants):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1,
withdrawalStatus=WithdrawalStatus.EARLY_OUT
)
member = self.data_generator.create_database_genomic_set_member(
genomicSetId=gen_set.id,
participantId=summary.participantId,
genomeType=config.GENOME_TYPE_ARRAY
)
if num % 2 == 0:
member_ids.append(member.id)
pids_to_update.append(summary.participantId)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 2)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[0]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
current_members = self.member_dao.get_all()
# 4 members updated correctly should return
for member in current_members:
if member.participantId in pids_to_update:
member.gemA2ManifestJobRunId = gem_a2_job_run.id
member.genomicWorkflowState = GenomicWorkflowState.GEM_RPT_READY
self.member_dao.update(member)
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 3)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[1]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
current_gem_report_states = self.report_state_dao.get_all()
self.assertEqual(len(current_gem_report_states), len(pids_to_update))
self.assertTrue(all(obj.event_type == 'result_ready' for obj in current_gem_report_states))
self.assertTrue(all(obj.event_authored_time is not None for obj in current_gem_report_states))
self.assertTrue(all(obj.module == 'gem' for obj in current_gem_report_states))
self.assertTrue(
all(obj.genomic_report_state == GenomicReportState.GEM_RPT_READY for obj in current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_report_state_str == GenomicReportState.GEM_RPT_READY.name for obj in
current_gem_report_states)
)
self.assertTrue(
all(obj.genomic_set_member_id in member_ids for obj in
current_gem_report_states)
)
# 4 members inserted already should not return
with GenomicJobController(GenomicJob.GEM_RESULT_REPORTS) as controller:
controller.gem_results_to_report_state()
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 4)
current_job_run = list(filter(lambda x: x.jobId == GenomicJob.GEM_RESULT_REPORTS, current_job_runs))[2]
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.NO_RESULTS)
self.clear_table_after_test('genomic_member_report_state')
def test_reconcile_informing_loop(self):
event_dao = UserEventMetricsDao()
event_dao.truncate() # for test suite
il_dao = GenomicInformingLoopDao()
for pid in range(8):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# insert set members
for b in ["aou_array", "aou_wgs"]:
for i in range(1, 9):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType=b,
)
# Set up ingested metrics data
events = ['gem.informing_loop.started',
'gem.informing_loop.screen8_no',
'gem.informing_loop.screen8_yes',
'hdr.informing_loop.started',
'gem.informing_loop.screen3',
'pgx.informing_loop.screen8_no',
'hdr.informing_loop.screen10_no']
for p in range(4):
for i in range(len(events)):
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=p + 1,
created_at=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i),
event_name=events[i],
run_id=1,
ignore_flag=0,
)
# Set up informing loop from message broker records
decisions = [None, 'no', 'yes']
for p in range(3):
for i in range(2):
self.data_generator.create_database_genomic_informing_loop(
message_record_id=i,
event_type='informing_loop_started' if i == 0 else 'informing_loop_decision',
module_type='gem',
participant_id=p + 1,
decision_value=decisions[i],
sample_id=100 + p,
event_authored_time=datetime.datetime(2021, 12, 29, 00) + datetime.timedelta(hours=i)
)
# Test for no message but yes user event
self.data_generator.create_database_genomic_user_event_metrics(
created=clock.CLOCK.now(),
modified=clock.CLOCK.now(),
participant_id=6,
created_at=datetime.datetime(2021, 12, 29, 00),
event_name='gem.informing_loop.screen8_yes',
run_id=1,
ignore_flag=0,
)
# Run reconcile job
genomic_pipeline.reconcile_informing_loop_responses()
# Test mismatched GEM data ingested correctly
pid_list = [1, 2, 3, 6]
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module="gem"
)
for value in new_il_values:
self.assertEqual("yes", value.decision_value)
pid_list = [1, 2, 3, 4]
for module in ["hdr", "pgx"]:
new_il_values = il_dao.get_latest_il_for_pids(
pid_list=pid_list,
module=module
)
for value in new_il_values:
self.assertEqual("no", value.decision_value)
self.assertIsNotNone(value.created_from_metric_id)
def test_reconcile_message_broker_results_ready(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(7):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 6):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 3 PGX records
if i < 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.result_ready",
run_id=1,
)
# 1 HDR Positive
if i == 4:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.informative",
run_id=1,
)
# 1 HDR uninformative
if i == 5:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.result_ready.uninformative",
run_id=1,
)
# Run job
genomic_cvl_pipeline.reconcile_message_broker_results_ready()
# Test correct data inserted
report_state_dao = GenomicMemberReportStateDao()
states = report_state_dao.get_all()
self.assertEqual(5, len(states))
pgx_records = [rec for rec in states if rec.module == "pgx_v1"]
hdr_record_uninf = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_UNINFORMATIVE][0]
hdr_record_pos = [rec for rec in states
if rec.genomic_report_state == GenomicReportState.HDR_RPT_POSITIVE][0]
for pgx_record in pgx_records:
self.assertEqual(GenomicReportState.PGX_RPT_READY, pgx_record.genomic_report_state)
self.assertEqual("PGX_RPT_READY", pgx_record.genomic_report_state_str)
self.assertEqual(int(pgx_record.sample_id), pgx_record.participant_id + 10)
self.assertEqual("result_ready", pgx_record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), pgx_record.event_authored_time)
self.assertIsNotNone(pgx_record.created_from_metric_id)
self.assertEqual("HDR_RPT_UNINFORMATIVE", hdr_record_uninf.genomic_report_state_str)
self.assertEqual(int(hdr_record_uninf.sample_id), hdr_record_uninf.participant_id + 10)
self.assertEqual("result_ready", hdr_record_uninf.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_uninf.event_authored_time)
self.assertIsNotNone(hdr_record_uninf.created_from_metric_id)
self.assertEqual("HDR_RPT_POSITIVE", hdr_record_pos.genomic_report_state_str)
self.assertEqual(int(hdr_record_pos.sample_id), hdr_record_pos.participant_id + 10)
self.assertEqual("result_ready", hdr_record_pos.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), hdr_record_pos.event_authored_time)
self.assertIsNotNone(hdr_record_pos.created_from_metric_id)
def test_reconcile_message_broker_results_viewed(self):
# Create Test Participants' data
# create genomic set
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
# Set up initial job run ID
self.data_generator.create_database_genomic_job_run(
jobId=GenomicJob.METRICS_FILE_INGEST,
startTime=clock.CLOCK.now()
)
for pid in range(3):
self.data_generator.create_database_participant(participantId=1 + pid, biobankId=1 + pid)
# insert set members and event metrics records
for i in range(1, 3):
self.data_generator.create_database_genomic_set_member(
participantId=i,
genomicSetId=1,
biobankId=i,
collectionTubeId=100 + i,
sampleId=10 + i,
genomeType="aou_wgs",
)
# 1 PGX Viewed
if i == 1:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="pgx.opened_at",
run_id=1,
)
# 1 HDR Viewed
if i == 2:
self.data_generator.create_database_genomic_user_event_metrics(
participant_id=i,
created_at=datetime.datetime(2022, 10, 6, 00),
event_name="hdr.opened_at",
run_id=1,
)
genomic_cvl_pipeline.reconcile_message_broker_results_viewed()
# Test correct data inserted
result_viewed_dao = GenomicResultViewedDao()
results = result_viewed_dao.get_all()
self.assertEqual(2, len(results))
for record in results:
if record.participant_id == 1:
self.assertEqual("pgx_v1", record.module_type)
else:
self.assertEqual("hdr_v1", record.module_type)
self.assertEqual(int(record.sample_id), record.participant_id + 10)
self.assertEqual("result_viewed", record.event_type)
self.assertEqual(datetime.datetime(2022, 10, 6, 00), record.first_viewed)
self.assertIsNotNone(record.created_from_metric_id)
def test_ingest_appointment_metrics_file(self):
test_file = 'Genomic-Metrics-File-Appointment-Events-Test.json'
bucket_name = 'test_bucket'
sub_folder = 'appointment_events'
pids = []
for _ in range(4):
summary = self.data_generator.create_database_participant_summary()
pids.append(summary.participantId)
test_file_path = f'{bucket_name}/{sub_folder}/{test_file}'
appointment_data = test_data.load_test_data_json(
"Genomic-Metrics-File-Appointment-Events-Test.json")
appointment_data_str = json.dumps(appointment_data, indent=4)
with open_cloud_file(test_file_path, mode='wb') as cloud_file:
cloud_file.write(appointment_data_str.encode("utf-8"))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_FILE_INGEST) as controller:
controller.ingest_appointment_metrics_file(
file_path=test_file_path,
)
all_metrics = self.appointment_metrics_dao.get_all()
# should be 5 metric records for whats in json file
self.assertEqual(len(all_metrics), 5)
self.assertTrue(all((obj.participant_id in pids for obj in all_metrics)))
self.assertTrue(all((obj.file_path == test_file_path for obj in all_metrics)))
self.assertTrue(all((obj.appointment_event is not None for obj in all_metrics)))
self.assertTrue(all((obj.created is not None for obj in all_metrics)))
self.assertTrue(all((obj.modified is not None for obj in all_metrics)))
self.assertTrue(all((obj.module_type is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_authored_time is not None for obj in all_metrics)))
self.assertTrue(all((obj.event_type is not None for obj in all_metrics)))
current_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(current_job_runs), 1)
current_job_run = current_job_runs[0]
self.assertTrue(current_job_run.jobId == GenomicJob.APPOINTMENT_METRICS_FILE_INGEST)
self.assertTrue(current_job_run.runResult == GenomicSubProcessResult.SUCCESS)
self.clear_table_after_test('genomic_appointment_event_metrics')
def test_reconcile_appointments_with_metrics(self):
fake_date = parser.parse('2020-05-29T08:00:01-05:00')
for num in range(4):
summary = self.data_generator.create_database_participant_summary()
missing_json = {
"event": "appointment_updated",
"eventAuthoredTime": "2022-09-16T17:18:38Z",
"participantId": f'P{summary.participantId}',
"messageBody": {
"module_type": "hdr",
"appointment_timestamp": "2022-09-19T19:30:00+00:00",
"id": 55,
"appointment_timezone": "America/Los_Angeles",
"location": "CA",
"contact_number": "18043704252",
"language": "en",
"source": "Color"
}
}
if num % 2 == 0:
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment_metric(
participant_id=summary.participantId,
appointment_event=json.dumps(missing_json, indent=4) if num % 2 != 0 else 'foo',
file_path='test_file_path',
module_type='hdr',
event_authored_time=fake_date,
event_type='appointment_updated' if num % 2 != 0 else 'appointment_scheduled'
)
current_events = self.appointment_event_dao.get_all()
# should be 2 initial appointment events
self.assertEqual(len(current_events), 2)
current_metrics = self.appointment_metrics_dao.get_all()
# should be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is None for obj in current_metrics))
with GenomicJobController(GenomicJob.APPOINTMENT_METRICS_RECONCILE) as controller:
controller.reconcile_appointment_events_from_metrics()
job_run = self.job_run_dao.get_all()
self.assertEqual(len(job_run), 1)
self.assertTrue(job_run[0].jobId == GenomicJob.APPOINTMENT_METRICS_RECONCILE)
current_events = self.appointment_event_dao.get_all()
# should be 4 appointment events 2 initial + 2 added
self.assertEqual(len(current_events), 4)
scheduled = list(filter(lambda x: x.event_type == 'appointment_scheduled', current_events))
self.assertEqual(len(scheduled), 2)
self.assertTrue(all(obj.created_from_metric_id is None for obj in scheduled))
updated = list(filter(lambda x: x.event_type == 'appointment_updated', current_events))
self.assertEqual(len(updated), 2)
self.assertTrue(all(obj.created_from_metric_id is not None for obj in updated))
current_metrics = self.appointment_metrics_dao.get_all()
# should STILL be 4 initial appointment events
self.assertEqual(len(current_metrics), 4)
self.assertTrue(all(obj.reconcile_job_run_id is not None for obj in current_metrics))
self.assertTrue(all(obj.reconcile_job_run_id == job_run[0].id for obj in current_metrics))
self.clear_table_after_test('genomic_appointment_event_metrics')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_appointments_gror_changed(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
notified_dao = GenomicAppointmentEventNotifiedDao()
config.override_setting(config.GENOMIC_COLOR_PM_EMAIL, ['[email protected]'])
num_participants = 4
for num in range(num_participants):
gror = num if num > 1 else 1
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=gror
)
self.data_generator.create_database_genomic_appointment(
message_record_id=num,
appointment_id=num,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(2, len(changed_ppts))
with GenomicJobController(GenomicJob.CHECK_APPOINTMENT_GROR_CHANGED) as controller:
controller.check_appointments_gror_changed()
self.assertEqual(email_mock.call_count, 1)
notified_appointments = notified_dao.get_all()
self.assertEqual(2, len(notified_appointments))
# test notified not returned by query
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=2
)
self.data_generator.create_database_genomic_appointment(
message_record_id=5,
appointment_id=5,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=summary.participantId,
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
changed_ppts = self.appointment_event_dao.get_appointments_gror_changed()
self.assertEqual(1, len(changed_ppts))
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(num_days=14)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 14 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_14day_escalation_error(self, email_mock):
email_mock.side_effect = ForbiddenError(mock.Mock(code=403))
mock_slack_handler = mock.MagicMock()
fake_date = parser.parse("2023-06-01T13:43:23")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(2):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
with GenomicJobController(GenomicJob.CHECK_GCR_OUTREACH_ESCALATION) as controller:
controller.genomic_alert_slack = mock_slack_handler
controller.check_gcr_escalation(controller.job_id)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
with notified_dao.session() as session:
notification = session.query(
GenomicGCROutreachEscalationNotified
).filter(
GenomicGCROutreachEscalationNotified.participant_id == pids[0]
).one()
self.assertEqual(email_mock.call_count, 1)
self.assertEqual(mock_slack_handler.send_message_to_webhook.call_count, 1)
self.assertEqual(False, notification.message_sent)
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.services.email_service.EmailService.send_email')
def test_check_gcr_ce_escalation(self, email_mock):
fake_date = parser.parse("2022-09-01T13:43:23")
fake_date2 = parser.parse("2022-09-02T14:14:00")
fake_date3 = parser.parse("2022-09-03T15:15:00")
config.override_setting(config.GENOMIC_GCR_ESCALATION_EMAILS, ['[email protected]'])
self.data_generator.create_database_genomic_set(
genomicSetName='test',
genomicSetCriteria='.',
genomicSetVersion=1
)
pids = []
for _ in range(6):
summary = self.data_generator.create_database_participant_summary(
consentForStudyEnrollment=1,
consentForGenomicsROR=1
)
set_member = self.data_generator.create_database_genomic_set_member(
participantId=summary.participantId,
genomicSetId=1,
biobankId=1001,
collectionTubeId=100,
sampleId=10,
genomeType="aou_wgs",
participantOrigin='careevolution'
)
self.data_generator.create_database_genomic_member_report_state(
participant_id=summary.participantId,
genomic_report_state=GenomicReportState.HDR_RPT_POSITIVE,
genomic_set_member_id=set_member.id,
module='hdr_v1',
event_authored_time=fake_date
)
pids.append(summary.participantId)
# Appointment scheduled in future: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=101,
appointment_id=102,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[0],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment completed: don't notify
self.data_generator.create_database_genomic_appointment(
message_record_id=102,
appointment_id=103,
event_type='appointment_completed',
module_type='hdr',
participant_id=pids[1],
event_authored_time=fake_date,
source='Color',
appointment_timestamp=fake_date,
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
# Appointment scheduled then canceled: notify
self.data_generator.create_database_genomic_appointment(
message_record_id=103,
appointment_id=104,
event_type='appointment_scheduled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date2,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
self.data_generator.create_database_genomic_appointment(
message_record_id=104,
appointment_id=104,
event_type='appointment_cancelled',
module_type='hdr',
participant_id=pids[2],
event_authored_time=fake_date3,
source='Color',
appointment_timestamp=format_datetime(clock.CLOCK.now()),
appointment_timezone='America/Los_Angeles',
location='123 address st',
contact_number='17348675309',
language='en'
)
notified_dao = GenomicDefaultBaseDao(model_type=GenomicGCROutreachEscalationNotified)
notified_dao.insert_bulk([{
'participant_id': pids[4],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': True
},{
'participant_id': pids[5],
'created': clock.CLOCK.now(),
'modified': clock.CLOCK.now(),
'message_sent': False
}])
with clock.FakeClock(parser.parse('2022-11-1T05:15:00')):
escalated_participants = self.report_state_dao.get_hdr_result_positive_no_appointment(
num_days=30,
participant_origin='careevolution'
)
results = [pid[0] for pid in escalated_participants]
self.assertIn(pids[2], results)
self.assertIn(pids[3], results)
self.assertIn(pids[5], results)
self.assertNotIn(pids[0], results)
self.assertNotIn(pids[1], results)
self.assertNotIn(pids[4], results)
with GenomicJobController(GenomicJob.CHECK_GCR_CE_OUTREACH_ESCALATION) as controller:
controller.check_gcr_escalation(controller.job_id)
self.assertEqual(email_mock.call_count, 3)
self.assertEqual(email_mock.call_args.args[0].subject, 'GCR Outreach 30 Day Escalation')
self.clear_table_after_test('genomic_gcr_outreach_escalation_notified')
@mock.patch('rdr_service.genomic.genomic_job_controller.GenomicJobController.execute_cloud_task')
def test_execute_auto_generation_from_last_run(self, cloud_task_mock):
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.ERROR
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.ERROR)
# task SHOULD NOT be called
self.assertEqual(cloud_task_mock.called, False)
self.assertEqual(cloud_task_mock.call_count, 0)
with GenomicJobController(
GenomicJob.PR_PR_WORKFLOW
) as controller:
controller.job_result = GenomicSubProcessResult.SUCCESS
controller._end_run()
controller.execute_auto_generation_from_cloud_task()
last_job_run_status = self.job_run_dao.get_last_run_status_for_job_id(job_id=GenomicJob.PR_PR_WORKFLOW)
self.assertTrue(last_job_run_status is not None)
self.assertTrue(last_job_run_status[0] == GenomicSubProcessResult.SUCCESS)
# task SHOULD be called
self.assertEqual(cloud_task_mock.called, True)
self.assertTrue(cloud_task_mock.call_args[1].get('payload').get('manifest_type') == 'p0')
self.assertTrue(cloud_task_mock.call_args[1].get('task_queue') == 'genomic-generate-manifest')
all_job_runs = self.job_run_dao.get_all()
self.assertEqual(len(all_job_runs), 2)
self.assertTrue(all(obj.runResult in [GenomicSubProcessResult.SUCCESS, GenomicSubProcessResult.ERROR] for obj
in all_job_runs))
self.assertTrue(all(obj.jobId == GenomicJob.PR_PR_WORKFLOW for obj in all_job_runs))
|
[
9,
13,
17,
22,
25
] |
2,169 |
e00cbe6e177ee841c6e64de842e5b8f95463b3a8
|
<mask token>
|
<mask token>
pandas2ri.activate()
<mask token>
|
<mask token>
ts = robjects.r('ts')
forecast = importr('forecast', lib_loc=
'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')
<mask token>
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf = pd.read_csv(train, index_col=0)
traindf.index = traindf.index.to_datetime()
rdata = ts(traindf.Price.values, frequency=4)
fit = forecast.auto_arima(rdata)
forecast_output = forecast.forecast(fit, h=16, level=95.0)
|
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
ts = robjects.r('ts')
forecast = importr('forecast', lib_loc=
'C:/Users/sand9888/Documents/sand9888/R/win-library/3.3')
import os
import pandas as pd
from rpy2.robjects import pandas2ri
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf = pd.read_csv(train, index_col=0)
traindf.index = traindf.index.to_datetime()
rdata = ts(traindf.Price.values, frequency=4)
fit = forecast.auto_arima(rdata)
forecast_output = forecast.forecast(fit, h=16, level=95.0)
|
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
ts=robjects.r('ts')
forecast = importr("forecast", lib_loc = "C:/Users/sand9888/Documents/sand9888/R/win-library/3.3")
import os
import pandas as pd
from rpy2.robjects import pandas2ri
pandas2ri.activate()
train = os.path.join('C:/DAT203.3x/Lab01/cadairydata.csv')
traindf=pd.read_csv(train, index_col=0)
traindf.index=traindf.index.to_datetime()
rdata=ts(traindf.Price.values,frequency=4)
fit=forecast.auto_arima(rdata)
forecast_output=forecast.forecast(fit,h=16,level=(95.0))
|
[
0,
1,
2,
3,
4
] |
2,170 |
188f82b0fb04d6814d77617fa9148113d0e6ef01
|
<mask token>
class Model(nn.Module):
<mask token>
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
<mask token>
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
<mask token>
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
<mask token>
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
<mask token>
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False,
**kw):
super(Model, self).__init__()
fc_embedding = []
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.
pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(
hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer,
False, True, bidirectional=is_bidir)
self.decoder = nn.Sequential(nn.Linear(encoder_layer * (int(
is_bidir) + 1) * hidden_size, hidden_size), nn.Linear(
hidden_size, hidden_size // step), nn.Linear(hidden_size //
step, 1))
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.
shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, hidden_size, encoder_layer=2, step=4, is_bidir=False, **kw):
super(Model, self).__init__()
fc_embedding = []
# First, we should convert the 1 dim data to a higher dim
for i in range(int(math.log(hidden_size, step))):
fc_embedding.append(nn.Linear(int(math.pow(step, i)), int(math.pow(step, i + 1))))
fc_embedding.append(nn.Linear(int(math.pow(step, int(math.log(hidden_size, step)))), hidden_size))
self.fc_embedding = nn.Sequential(*fc_embedding)
self.encoder = nn.GRU(hidden_size, hidden_size, encoder_layer, False, True,
bidirectional=is_bidir)
self.decoder = nn.Sequential(
nn.Linear(encoder_layer * (int(is_bidir) + 1) * hidden_size, hidden_size),
nn.Linear(hidden_size, hidden_size // step),
nn.Linear(hidden_size // step, 1),
)
def forward(self, input_seq, target_seq=None):
input_seq = self.fc_embedding(input_seq.unsqueeze(-1))
_, encoding_result = self.encoder(input_seq)
encoding_result = torch.transpose(encoding_result, 0, 1).contiguous()
encoding_result = torch.reshape(encoding_result, [encoding_result.shape[0], encoding_result.shape[1] * encoding_result.shape[2]])
seq_pred = self.decoder(encoding_result)
return seq_pred.squeeze(1)
def _loss_fn(self, seq_pred, target_seq):
return F.mse_loss(seq_pred, target_seq)
def train_batch(self, input_seq, target_seq, category, optimizer, logger):
"""
doc:
train the model with given data and optimizer, return log info
param:
input_seq: torch.LongTensor, [batch, max_seq_len]
target_seq: torch.LongTensor, [batch, max_seq_len]
optimizer: optimizer object
logger: logger object
"""
seq_pred = self.forward(input_seq, target_seq)
loss = self._loss_fn(seq_pred, target_seq)
# optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item(), seq_pred
def infer_batch(self, input_seq, logger):
"""
model inference.
The given data can be in the form of batch or single isinstance
"""
return self.forward(input_seq, None)
|
[
4,
5,
6,
7,
8
] |
2,171 |
9d22a90835f5cf293808ab359244fe1bde81f3e1
|
<mask token>
|
<mask token>
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
|
<mask token>
period = 7
start = '01.01.2021'
end = '10.06.2021'
periods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':
6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
tickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':
399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,
'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA':
19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE':
81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,
'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP':
20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,
'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP':
81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,
'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU':
16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,
'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP':
16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':
16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,
'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT':
388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,
'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,
'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK':
35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,
'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK':
20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,
'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,
'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF':
81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,
'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,
'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,
'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,
'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS':
12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,
'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ':
21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,
'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB':
16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':
21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,
'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC':
20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,
'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK':
17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,
'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA':
81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,
'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN':
83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,
'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB':
181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,
'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN':
17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,
'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,
'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB':
16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,
'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG':
436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,
'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER':
80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,
'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,
'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE':
420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,
'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS':
175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,
'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR':
15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,
'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ':
17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,
'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK':
19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,
'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX':
388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,
'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}
FINAM_URL = 'http://export.finam.ru/'
market = 0
start_date = datetime.strptime(start, '%d.%m.%Y').date()
start_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, '%d.%m.%Y').date()
end_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
|
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from datetime import datetime
period = 7
start = '01.01.2021'
end = '10.06.2021'
periods = {'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min':
6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
tickers = {'ABRD': 82460, 'AESL': 181867, 'AFKS': 19715, 'AFLT': 29, 'AGRO':
399716, 'AKRN': 17564, 'ALBK': 82616, 'ALNU': 81882, 'ALRS': 81820,
'AMEZ': 20702, 'APTK': 13855, 'AQUA': 35238, 'ARMD': 19676, 'ARSA':
19915, 'ASSB': 16452, 'AVAN': 82843, 'AVAZ': 39, 'AVAZP': 40, 'BANE':
81757, 'BANEP': 81758, 'BGDE': 175840, 'BISV': 35242, 'BISVP': 35243,
'BLNG': 21078, 'BRZL': 81901, 'BSPB': 20066, 'CBOM': 420694, 'CHEP':
20999, 'CHGZ': 81933, 'CHKZ': 21000, 'CHMF': 16136, 'CHMK': 21001,
'CHZN': 19960, 'CLSB': 16712, 'CLSBP': 16713, 'CNTL': 21002, 'CNTLP':
81575, 'DASB': 16825, 'DGBZ': 17919, 'DIOD': 35363, 'DIXY': 18564,
'DVEC': 19724, 'DZRD': 74744, 'DZRDP': 74745, 'ELTZ': 81934, 'ENRU':
16440, 'EPLN': 451471, 'ERCO': 81935, 'FEES': 20509, 'FESH': 20708,
'FORTP': 82164, 'GAZA': 81997, 'GAZAP': 81998, 'GAZC': 81398, 'GAZP':
16842, 'GAZS': 81399, 'GAZT': 82115, 'GCHE': 20125, 'GMKN': 795, 'GRAZ':
16610, 'GRNT': 449114, 'GTLC': 152876, 'GTPR': 175842, 'GTSS': 436120,
'HALS': 17698, 'HIMC': 81939, 'HIMCP': 81940, 'HYDR': 20266, 'IDJT':
388276, 'IDVP': 409486, 'IGST': 81885, 'IGST03': 81886, 'IGSTP': 81887,
'IRAO': 20516, 'IRGZ': 9, 'IRKT': 15547, 'ISKJ': 17137, 'JNOS': 15722,
'JNOSP': 15723, 'KAZT': 81941, 'KAZTP': 81942, 'KBSB': 19916, 'KBTK':
35285, 'KCHE': 20030, 'KCHEP': 20498, 'KGKC': 83261, 'KGKCP': 152350,
'KLSB': 16329, 'KMAZ': 15544, 'KMEZ': 22525, 'KMTZ': 81903, 'KOGK':
20710, 'KRKN': 81891, 'KRKNP': 81892, 'KRKO': 81905, 'KRKOP': 81906,
'KROT': 510, 'KROTP': 511, 'KRSB': 20912, 'KRSBP': 20913, 'KRSG': 15518,
'KSGR': 75094, 'KTSB': 16284, 'KTSBP': 16285, 'KUBE': 522, 'KUNF':
81943, 'KUZB': 83165, 'KZMS': 17359, 'KZOS': 81856, 'KZOSP': 81857,
'LIFE': 74584, 'LKOH': 8, 'LNTA': 385792, 'LNZL': 21004, 'LNZLP': 22094,
'LPSB': 16276, 'LSNG': 31, 'LSNGP': 542, 'LSRG': 19736, 'LVHK': 152517,
'MAGE': 74562, 'MAGEP': 74563, 'MAGN': 16782, 'MERF': 20947, 'MFGS': 30,
'MFGSP': 51, 'MFON': 152516, 'MGNT': 17086, 'MGNZ': 20892, 'MGTS':
12984, 'MGTSP': 12983, 'MGVM': 81829, 'MISB': 16330, 'MISBP': 16331,
'MNFD': 80390, 'MOBB': 82890, 'MOEX': 152798, 'MORI': 81944, 'MOTZ':
21116, 'MRKC': 20235, 'MRKK': 20412, 'MRKP': 20107, 'MRKS': 20346,
'MRKU': 20402, 'MRKV': 20286, 'MRKY': 20681, 'MRKZ': 20309, 'MRSB':
16359, 'MSNG': 6, 'MSRS': 16917, 'MSST': 152676, 'MSTT': 74549, 'MTLR':
21018, 'MTLRP': 80745, 'MTSS': 15523, 'MUGS': 81945, 'MUGSP': 81946,
'MVID': 19737, 'NAUK': 81992, 'NFAZ': 81287, 'NKHP': 450432, 'NKNC':
20100, 'NKNCP': 20101, 'NKSH': 81947, 'NLMK': 17046, 'NMTP': 19629,
'NNSB': 16615, 'NNSBP': 16616, 'NPOF': 81858, 'NSVZ': 81929, 'NVTK':
17370, 'ODVA': 20737, 'OFCB': 80728, 'OGKB': 18684, 'OMSH': 22891,
'OMZZP': 15844, 'OPIN': 20711, 'OSMP': 21006, 'OTCP': 407627, 'PAZA':
81896, 'PHOR': 81114, 'PHST': 19717, 'PIKK': 18654, 'PLSM': 81241,
'PLZL': 17123, 'PMSB': 16908, 'PMSBP': 16909, 'POLY': 175924, 'PRFN':
83121, 'PRIM': 17850, 'PRIN': 22806, 'PRMB': 80818, 'PRTK': 35247,
'PSBR': 152320, 'QIWI': 181610, 'RASP': 17713, 'RBCM': 74779, 'RDRB':
181755, 'RGSS': 181934, 'RKKE': 20321, 'RLMN': 152677, 'RLMNP': 388313,
'RNAV': 66644, 'RODNP': 66693, 'ROLO': 181316, 'ROSB': 16866, 'ROSN':
17273, 'ROST': 20637, 'RSTI': 20971, 'RSTIP': 20972, 'RTGZ': 152397,
'RTKM': 7, 'RTKMP': 15, 'RTSB': 16783, 'RTSBP': 16784, 'RUAL': 414279,
'RUALR': 74718, 'RUGR': 66893, 'RUSI': 81786, 'RUSP': 20712, 'RZSB':
16455, 'SAGO': 445, 'SAGOP': 70, 'SARE': 11, 'SAREP': 24, 'SBER': 3,
'SBERP': 23, 'SELG': 81360, 'SELGP': 82610, 'SELL': 21166, 'SIBG':
436091, 'SIBN': 2, 'SKYC': 83122, 'SNGS': 4, 'SNGSP': 13, 'STSB': 20087,
'STSBP': 20088, 'SVAV': 16080, 'SYNG': 19651, 'SZPR': 22401, 'TAER':
80593, 'TANL': 81914, 'TANLP': 81915, 'TASB': 16265, 'TASBP': 16266,
'TATN': 825, 'TATNP': 826, 'TGKA': 18382, 'TGKB': 17597, 'TGKBP': 18189,
'TGKD': 18310, 'TGKDP': 18391, 'TGKN': 18176, 'TGKO': 81899, 'TNSE':
420644, 'TORS': 16797, 'TORSP': 16798, 'TRCN': 74561, 'TRMK': 18441,
'TRNFP': 1012, 'TTLK': 18371, 'TUCH': 74746, 'TUZA': 20716, 'UCSS':
175781, 'UKUZ': 20717, 'UNAC': 22843, 'UNKL': 82493, 'UPRO': 18584,
'URFD': 75124, 'URKA': 19623, 'URKZ': 82611, 'USBN': 81953, 'UTAR':
15522, 'UTII': 81040, 'UTSY': 419504, 'UWGN': 414560, 'VDSB': 16352,
'VGSB': 16456, 'VGSBP': 16457, 'VJGZ': 81954, 'VJGZP': 81955, 'VLHZ':
17257, 'VRAO': 20958, 'VRAOP': 20959, 'VRSB': 16546, 'VRSBP': 16547,
'VSMO': 15965, 'VSYD': 83251, 'VSYDP': 83252, 'VTBR': 19043, 'VTGK':
19632, 'VTRS': 82886, 'VZRZ': 17068, 'VZRZP': 17067, 'WTCM': 19095,
'WTCMP': 19096, 'YAKG': 81917, 'YKEN': 81766, 'YKENP': 81769, 'YNDX':
388383, 'YRSB': 16342, 'YRSBP': 16343, 'ZHIV': 181674, 'ZILL': 81918,
'ZMZN': 556, 'ZMZNP': 603, 'ZVEZ': 82001}
FINAM_URL = 'http://export.finam.ru/'
market = 0
start_date = datetime.strptime(start, '%d.%m.%Y').date()
start_date_rev = datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, '%d.%m.%Y').date()
end_date_rev = datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([('market', market), ('em', tickers[ticker]), (
'code', ticker), ('apply', 0), ('df', start_date.day), ('mf',
start_date.month - 1), ('yf', start_date.year), ('from', start_date
), ('dt', end_date.day), ('mt', end_date.month - 1), ('yt',
end_date.year), ('to', end_date), ('p', period), ('f', ticker + '_' +
start_date_rev + '_' + end_date_rev), ('e', '.csv'), ('cn', ticker),
('dtf', 1), ('tmf', 1), ('MSOR', 0), ('mstime', 'on'), ('mstimever',
1), ('sep', 1), ('sep2', 1), ('datf', 1), ('at', 1)])
url = (FINAM_URL + ticker + '_' + start_date_rev + '_' + end_date_rev +
'.csv?' + params)
print('Стучимся на Финам по ссылке: ' + url)
txt = urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines()
local_file = open(f'{ticker}_{start}_{end}.txt', 'w')
for line in txt:
local_file.write(line.strip().decode('utf-8') + '\n')
local_file.close()
print('Готово. Проверьте файл quotes.txt в папке где лежит скрипт')
|
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from datetime import datetime
#пользовательские переменные
period=7 # задаём период. Выбор из: 'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10
start = "01.01.2021" #с какой даты начинать тянуть котировки
end = "10.06.2021" #финальная дата, по которую тянуть котировки
periods={'tick': 1, 'min': 2, '5min': 3, '10min': 4, '15min': 5, '30min': 6, 'hour': 7, 'daily': 8, 'week': 9, 'month': 10}
#каждой акции Финам присвоил цифровой код:
tickers={'ABRD':82460,'AESL':181867,'AFKS':19715,'AFLT':29,'AGRO':399716,'AKRN':17564,'ALBK':82616,'ALNU':81882,'ALRS':81820,'AMEZ':20702,'APTK':13855,'AQUA':35238,'ARMD':19676,'ARSA':19915,'ASSB':16452,'AVAN':82843,'AVAZ':39,'AVAZP':40,'BANE':81757,'BANEP':81758,'BGDE':175840,'BISV':35242,'BISVP':35243,'BLNG':21078,'BRZL':81901,'BSPB':20066,'CBOM':420694,'CHEP':20999,'CHGZ':81933,'CHKZ':21000,'CHMF':16136,'CHMK':21001,'CHZN':19960,'CLSB':16712,'CLSBP':16713,'CNTL':21002,'CNTLP':81575,'DASB':16825,'DGBZ':17919,'DIOD':35363,'DIXY':18564,'DVEC':19724,'DZRD':74744,'DZRDP':74745,'ELTZ':81934,'ENRU':16440,'EPLN':451471,'ERCO':81935,'FEES':20509,'FESH':20708,'FORTP':82164,'GAZA':81997,'GAZAP':81998,'GAZC':81398,'GAZP':16842,'GAZS':81399,'GAZT':82115,'GCHE':20125,'GMKN':795,'GRAZ':16610,'GRNT':449114,'GTLC':152876,'GTPR':175842,'GTSS':436120,'HALS':17698,'HIMC':81939,'HIMCP':81940,'HYDR':20266,'IDJT':388276,'IDVP':409486,'IGST':81885,'IGST03':81886,'IGSTP':81887,'IRAO':20516,'IRGZ':9,'IRKT':15547,'ISKJ':17137,'JNOS':15722,'JNOSP':15723,'KAZT':81941,'KAZTP':81942,'KBSB':19916,'KBTK':35285,'KCHE':20030,'KCHEP':20498,'KGKC':83261,'KGKCP':152350,'KLSB':16329,'KMAZ':15544,'KMEZ':22525,'KMTZ':81903,'KOGK':20710,'KRKN':81891,'KRKNP':81892,'KRKO':81905,'KRKOP':81906,'KROT':510,'KROTP':511,'KRSB':20912,'KRSBP':20913,'KRSG':15518,'KSGR':75094,'KTSB':16284,'KTSBP':16285,'KUBE':522,'KUNF':81943,'KUZB':83165,'KZMS':17359,'KZOS':81856,'KZOSP':81857,'LIFE':74584,'LKOH':8,'LNTA':385792,'LNZL':21004,'LNZLP':22094,'LPSB':16276,'LSNG':31,'LSNGP':542,'LSRG':19736,'LVHK':152517,'MAGE':74562,'MAGEP':74563,'MAGN':16782,'MERF':20947,'MFGS':30,'MFGSP':51,'MFON':152516,'MGNT':17086,'MGNZ':20892,'MGTS':12984,'MGTSP':12983,'MGVM':81829,'MISB':16330,'MISBP':16331,'MNFD':80390,'MOBB':82890,'MOEX':152798,'MORI':81944,'MOTZ':21116,'MRKC':20235,'MRKK':20412,'MRKP':20107,'MRKS':20346,'MRKU':20402,'MRKV':20286,'MRKY':20681,'MRKZ':20309,'MRSB':16359,'MSNG':6,'MSRS':16917,'MSST':152676,'MSTT':74549,'MTLR':21018,'MTLRP':80745,'MTSS':15523,'MUGS':81945,'MUGSP':81946,'MVID':19737,'NAUK':81992,'NFAZ':81287,'NKHP':450432,'NKNC':20100,'NKNCP':20101,'NKSH':81947,'NLMK':17046,'NMTP':19629,'NNSB':16615,'NNSBP':16616,'NPOF':81858,'NSVZ':81929,'NVTK':17370,'ODVA':20737,'OFCB':80728,'OGKB':18684,'OMSH':22891,'OMZZP':15844,'OPIN':20711,'OSMP':21006,'OTCP':407627,'PAZA':81896,'PHOR':81114,'PHST':19717,'PIKK':18654,'PLSM':81241,'PLZL':17123,'PMSB':16908,'PMSBP':16909,'POLY':175924,'PRFN':83121,'PRIM':17850,'PRIN':22806,'PRMB':80818,'PRTK':35247,'PSBR':152320,'QIWI':181610,'RASP':17713,'RBCM':74779,'RDRB':181755,'RGSS':181934,'RKKE':20321,'RLMN':152677,'RLMNP':388313,'RNAV':66644,'RODNP':66693,'ROLO':181316,'ROSB':16866,'ROSN':17273,'ROST':20637,'RSTI':20971,'RSTIP':20972,'RTGZ':152397,'RTKM':7,'RTKMP':15,'RTSB':16783,'RTSBP':16784,'RUAL':414279,'RUALR':74718,'RUGR':66893,'RUSI':81786,'RUSP':20712,'RZSB':16455,'SAGO':445,'SAGOP':70,'SARE':11,'SAREP':24,'SBER':3,'SBERP':23,'SELG':81360,'SELGP':82610,'SELL':21166,'SIBG':436091,'SIBN':2,'SKYC':83122,'SNGS':4,'SNGSP':13,'STSB':20087,'STSBP':20088,'SVAV':16080,'SYNG':19651,'SZPR':22401,'TAER':80593,'TANL':81914,'TANLP':81915, 'TASB':16265,'TASBP':16266,'TATN':825,'TATNP':826,'TGKA':18382,'TGKB':17597,'TGKBP':18189,'TGKD':18310,'TGKDP':18391,'TGKN':18176,'TGKO':81899,'TNSE':420644,'TORS':16797,'TORSP':16798,'TRCN':74561,'TRMK':18441,'TRNFP':1012,'TTLK':18371,'TUCH':74746,'TUZA':20716,'UCSS':175781,'UKUZ':20717,'UNAC':22843,'UNKL':82493,'UPRO':18584,'URFD':75124,'URKA':19623,'URKZ':82611,'USBN':81953,'UTAR':15522,'UTII':81040,'UTSY':419504,'UWGN':414560,'VDSB':16352,'VGSB':16456,'VGSBP':16457,'VJGZ':81954,'VJGZP':81955,'VLHZ':17257,'VRAO':20958,'VRAOP':20959,'VRSB':16546,'VRSBP':16547,'VSMO':15965,'VSYD':83251,'VSYDP':83252,'VTBR':19043,'VTGK':19632,'VTRS':82886,'VZRZ':17068,'VZRZP':17067,'WTCM':19095,'WTCMP':19096,'YAKG':81917,'YKEN':81766,'YKENP':81769,'YNDX':388383,'YRSB':16342,'YRSBP':16343,'ZHIV':181674,'ZILL':81918,'ZMZN':556,'ZMZNP':603,'ZVEZ':82001}
FINAM_URL = "http://export.finam.ru/"# сервер, на который стучимся
market = 0 #можно не задавать. Это рынок, на котором торгуется бумага. Для акций работает с любой цифрой. Другие рынки не проверял.
#Делаем преобразования дат:
start_date = datetime.strptime(start, "%d.%m.%Y").date()
start_date_rev=datetime.strptime(start, '%d.%m.%Y').strftime('%Y%m%d')
end_date = datetime.strptime(end, "%d.%m.%Y").date()
end_date_rev=datetime.strptime(end, '%d.%m.%Y').strftime('%Y%m%d')
for ticker in tickers:
params = urlencode([
('market', market), #на каком рынке торгуется бумага
('em', tickers[ticker]), #вытягиваем цифровой символ, который соответствует бумаге.
('code', ticker), #тикер нашей акции
('apply',0), #не нашёл что это значит.
('df', start_date.day), #Начальная дата, номер дня (1-31)
('mf', start_date.month - 1), #Начальная дата, номер месяца (0-11)
('yf', start_date.year), #Начальная дата, год
('from', start_date), #Начальная дата полностью
('dt', end_date.day), #Конечная дата, номер дня
('mt', end_date.month - 1), #Конечная дата, номер месяца
('yt', end_date.year), #Конечная дата, год
('to', end_date), #Конечная дата
('p', period), #Таймфрейм
('f', ticker+"_" + start_date_rev + "_" + end_date_rev), #Имя сформированного файла
('e', ".csv"), #Расширение сформированного файла
('cn', ticker), #ещё раз тикер акции
('dtf', 1), #В каком формате брать даты. Выбор из 5 возможных. См. страницу https://www.finam.ru/profile/moex-akcii/sberbank/export/
('tmf', 1), #В каком формате брать время. Выбор из 4 возможных.
('MSOR', 0), #Время свечи (0 - open; 1 - close)
('mstime', "on"), #Московское время
('mstimever', 1), #Коррекция часового пояса
('sep', 1), #Разделитель полей (1 - запятая, 2 - точка, 3 - точка с запятой, 4 - табуляция, 5 - пробел)
('sep2', 1), #Разделитель разрядов
('datf', 1), #Формат записи в файл. Выбор из 6 возможных.
('at', 1)]) #Нужны ли заголовки столбцов
url = FINAM_URL + ticker+"_" + start_date_rev + "_" + end_date_rev + ".csv?" + params #урл составлен!
print("Стучимся на Финам по ссылке: "+url)
##!txt=urlopen(url).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.
txt=urlopen(Request(url, headers={'User-Agent': 'Mozilla'})).readlines() #здесь лежит огромный массив данных, прилетевший с Финама.
local_file = open(f'{ticker}_{start}_{end}.txt', "w") #задаём файл, в который запишем котировки.
for line in txt: #записываем свечи строку за строкой.
local_file.write(line.strip().decode( "utf-8" )+'\n')
local_file.close()
print("Готово. Проверьте файл quotes.txt в папке где лежит скрипт")
|
[
0,
1,
2,
3,
4
] |
2,172 |
285ca945696b32160175f15c4e89b3938f41ebf4
|
<mask token>
def get_diabetes_data(target='progression'):
"""Get the SKLearn Diabetes regression dataset, formatted as a DataFrame
Parameters
----------
target: String, default='progression'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The diabetes dataset, with friendly column names"""
data = load_diabetes()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
<mask token>
|
<mask token>
def get_diabetes_data(target='progression'):
"""Get the SKLearn Diabetes regression dataset, formatted as a DataFrame
Parameters
----------
target: String, default='progression'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The diabetes dataset, with friendly column names"""
data = load_diabetes()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
def get_toy_classification_data(target='target', n_samples=300, n_classes=2,
shuffle=True, random_state=32, **kwargs):
"""Wrapper around `sklearn.datasets.make_classification` to produce a `pandas.DataFrame`"""
x, y = make_classification(n_samples=n_samples, n_classes=n_classes,
shuffle=shuffle, random_state=random_state, **kwargs)
train_df = pd.DataFrame(data=x, columns=range(x.shape[1]))
train_df[target] = y
return train_df
|
<mask token>
def get_breast_cancer_data(target='diagnosis'):
"""Get the Wisconsin Breast Cancer classification dataset, formatted as a DataFrame
Parameters
----------
target: String, default='diagnosis'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The breast cancer dataset, with friendly column names"""
data = load_breast_cancer()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
def get_diabetes_data(target='progression'):
"""Get the SKLearn Diabetes regression dataset, formatted as a DataFrame
Parameters
----------
target: String, default='progression'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The diabetes dataset, with friendly column names"""
data = load_diabetes()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
def get_toy_classification_data(target='target', n_samples=300, n_classes=2,
shuffle=True, random_state=32, **kwargs):
"""Wrapper around `sklearn.datasets.make_classification` to produce a `pandas.DataFrame`"""
x, y = make_classification(n_samples=n_samples, n_classes=n_classes,
shuffle=shuffle, random_state=random_state, **kwargs)
train_df = pd.DataFrame(data=x, columns=range(x.shape[1]))
train_df[target] = y
return train_df
|
<mask token>
import pandas as pd
from sklearn.datasets import load_breast_cancer, make_classification, load_diabetes
def get_breast_cancer_data(target='diagnosis'):
"""Get the Wisconsin Breast Cancer classification dataset, formatted as a DataFrame
Parameters
----------
target: String, default='diagnosis'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The breast cancer dataset, with friendly column names"""
data = load_breast_cancer()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
def get_diabetes_data(target='progression'):
"""Get the SKLearn Diabetes regression dataset, formatted as a DataFrame
Parameters
----------
target: String, default='progression'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The diabetes dataset, with friendly column names"""
data = load_diabetes()
df = pd.DataFrame(data=data.data, columns=[_.replace(' ', '_') for _ in
data.feature_names])
df[target] = data.target
return df
def get_toy_classification_data(target='target', n_samples=300, n_classes=2,
shuffle=True, random_state=32, **kwargs):
"""Wrapper around `sklearn.datasets.make_classification` to produce a `pandas.DataFrame`"""
x, y = make_classification(n_samples=n_samples, n_classes=n_classes,
shuffle=shuffle, random_state=random_state, **kwargs)
train_df = pd.DataFrame(data=x, columns=range(x.shape[1]))
train_df[target] = y
return train_df
|
"""This module defines simple utilities for making toy datasets to be used in testing/examples"""
##################################################
# Import Miscellaneous Assets
##################################################
import pandas as pd
###############################################
# Import Learning Assets
###############################################
from sklearn.datasets import load_breast_cancer, make_classification, load_diabetes
##################################################
# Dataset Utilities
##################################################
def get_breast_cancer_data(target="diagnosis"):
"""Get the Wisconsin Breast Cancer classification dataset, formatted as a DataFrame
Parameters
----------
target: String, default='diagnosis'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The breast cancer dataset, with friendly column names"""
data = load_breast_cancer()
df = pd.DataFrame(data=data.data, columns=[_.replace(" ", "_") for _ in data.feature_names])
df[target] = data.target
return df
def get_diabetes_data(target="progression"):
"""Get the SKLearn Diabetes regression dataset, formatted as a DataFrame
Parameters
----------
target: String, default='progression'
What to name the column in `df` that contains the target output values
Returns
-------
df: `pandas.DataFrame`
The diabetes dataset, with friendly column names"""
data = load_diabetes()
df = pd.DataFrame(data=data.data, columns=[_.replace(" ", "_") for _ in data.feature_names])
df[target] = data.target
return df
def get_toy_classification_data(
target="target", n_samples=300, n_classes=2, shuffle=True, random_state=32, **kwargs
):
"""Wrapper around `sklearn.datasets.make_classification` to produce a `pandas.DataFrame`"""
x, y = make_classification(
n_samples=n_samples,
n_classes=n_classes,
shuffle=shuffle,
random_state=random_state,
**kwargs
)
train_df = pd.DataFrame(data=x, columns=range(x.shape[1]))
train_df[target] = y
return train_df
|
[
1,
2,
3,
4,
5
] |
2,173 |
b9eeccbed63aa42afa09fe7ef782066f300255a1
|
<mask token>
|
<mask token>
sense.set_pixels(prenume)
|
<mask token>
sense = SenseHat()
b = 0, 0, 204
w = 255, 255, 255
e = 0, 0, 0
y = 255, 255, 0
r = 255, 0, 0
prenume = [e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, b, e, y, y, e,
r, e, b, e, b, y, e, y, r, e, b, b, b, y, e, y, r, e, b, e, b, y, e, y,
r, e, b, e, b, y, y, e, r, e, e, e, e, e, e, e, e, e]
sense.set_pixels(prenume)
|
from sense_hat import SenseHat
import time
sense = SenseHat()
b = 0, 0, 204
w = 255, 255, 255
e = 0, 0, 0
y = 255, 255, 0
r = 255, 0, 0
prenume = [e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, e, b, e, y, y, e,
r, e, b, e, b, y, e, y, r, e, b, b, b, y, e, y, r, e, b, e, b, y, e, y,
r, e, b, e, b, y, y, e, r, e, e, e, e, e, e, e, e, e]
sense.set_pixels(prenume)
|
from sense_hat import SenseHat
import time
sense = SenseHat()
b = (0, 0, 204) #Blue
w = (255, 255, 255) #White
e = (0, 0, 0) #Empty
y = (255, 255, 0) #Yellow
r = (255, 0, 0) #red
prenume = [
e, e, e, e, e, e, e, e,
e, e, e, e, e, e, e, e,
e, b, e, y, y, e, r, e,
b, e, b, y, e, y, r, e,
b, b, b, y, e, y, r, e,
b, e, b, y, e, y, r, e,
b, e, b, y, y, e, r, e,
e, e, e, e, e, e, e, e,
]
sense.set_pixels(prenume)
|
[
0,
1,
2,
3,
4
] |
2,174 |
609071fc3af1b526fbd4555ced2376f56ae0f3c3
|
<mask token>
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
<mask token>
|
<mask token>
pygame.init()
<mask token>
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
<mask token>
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
|
<mask token>
pygame.init()
Brack = [0, 0, 0]
White = [255, 255, 255]
Green = [0, 255, 0]
Red = [255, 0, 0]
Gray = [169, 169, 169]
button_text = ['开 始', '开 始', '开 始', '开 始', '开 始']
line = ['http://localhost:5050/mixer/000',
'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',
'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']
line0 = ['http://localhost:5000/carrier/moveto/0',
'http://localhost:5000/carrier/moveto/1',
'http://localhost:5000/carrier/moveto/2',
'http://localhost:5000/carrier/moveto/3',
'http://localhost:5000/carrier/moveto/4']
CGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],
[1, 1, 1, 1, 0]]
color = [Green, Green, Green, Green, Green]
button_text0 = '手动状态:'
button_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']
Num = ['0', '1', '2', '3', '4']
B0 = [452, 522, 592, 662, 732]
screen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
text = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']
text_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)
text_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)
text_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)
text_fmt0 = text_0.render('操 作 界 面', 2, Brack)
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
|
import time
import json
import pygame
from pygame.locals import *
import urllib.request
from pygame.color import THECOLORS
pygame.init()
Brack = [0, 0, 0]
White = [255, 255, 255]
Green = [0, 255, 0]
Red = [255, 0, 0]
Gray = [169, 169, 169]
button_text = ['开 始', '开 始', '开 始', '开 始', '开 始']
line = ['http://localhost:5050/mixer/000',
'http://localhost:5050/mixer/100', 'http://localhost:5050/mixer/200',
'http://localhost:5050/mixer/300', 'http://localhost:5050/mixer/400']
line0 = ['http://localhost:5000/carrier/moveto/0',
'http://localhost:5000/carrier/moveto/1',
'http://localhost:5000/carrier/moveto/2',
'http://localhost:5000/carrier/moveto/3',
'http://localhost:5000/carrier/moveto/4']
CGQ = [[0, 1, 1, 1, 1], [1, 0, 1, 1, 1], [1, 1, 0, 1, 1], [1, 1, 1, 0, 1],
[1, 1, 1, 1, 0]]
color = [Green, Green, Green, Green, Green]
button_text0 = '手动状态:'
button_text1 = ['工位0', '工位1', '工位2', '工位3', '工位4']
Num = ['0', '1', '2', '3', '4']
B0 = [452, 522, 592, 662, 732]
screen = pygame.display.set_mode((1240, 768), FULLSCREEN, 32)
screen.fill(Brack)
pygame.draw.rect(screen, White, [420, 134, 400, 500], 0)
text = ['工 序 甲:', '工 序 乙:', '工 序 丙:', '工 序 丁:', '工 序 戊:']
text_0 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 22)
text_1 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 18)
text_2 = pygame.font.Font('/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc', 15)
text_fmt0 = text_0.render('操 作 界 面', 2, Brack)
screen.blit(text_fmt0, (545, 140))
pygame.display.update()
def Process(num, x, y, button_text, color):
text_fmt1 = text_1.render(text[num], 1, Brack)
screen.blit(text_fmt1, (x - 127, y))
pygame.draw.rect(screen, Brack, [x, y, 60, 25], 2)
pygame.draw.rect(screen, color, [x + 2, y + 2, 57, 22], 0)
button = text_2.render(button_text, 1, Brack)
screen.blit(button, (x + 13, y + 3))
pygame.display.update()
def Station(num, x, y, a):
pygame.draw.rect(screen, Brack, [x, y, 55, 28], 2)
pygame.draw.rect(screen, Green, [x + 2, y + 2, 52, 25], 0)
button = text_2.render(button_text1[num], 1, Brack)
screen.blit(button, (x + 9, y + 4))
img = pygame.image.load('cgq.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (x, y + 80))
button = text_1.render(Num[a], 1, Brack)
screen.blit(button, (x + 20, 610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen, White, [506, 440, 85, 28], 0)
pygame.draw.rect(screen, Brack, [597, 440, 65, 28], 2)
pygame.draw.rect(screen, Green, [599, 442, 62, 25], 0)
button1 = text_1.render('切 换', 1, Brack)
screen.blit(button1, (611, 444))
button = text_1.render(button_text0, 1, Brack)
screen.blit(button, (506, 444))
B = [[0, 647, 190, button_text[0], color[0]], [1, 647, 240,
button_text[1], color[1]], [2, 647, 290, button_text[2], color[
2]], [3, 647, 340, button_text[3], color[3]], [4, 647, 390,
button_text[4], color[4]]]
if button_text == ['开 始', '开 始', '开 始', '开 始', '开 始']:
response2 = urllib.request.urlopen(
'http://localhost:5000/carrier/status')
html2 = response2.read()
text2 = json.loads(html2)
a = text2['sensors']
b = text2['pos']
C = [[0, 452, 490, a[0]], [1, 522, 490, a[1]], [2, 592, 490, a[2]],
[3, 662, 490, a[3]], [4, 732, 490, a[4]]]
pygame.draw.rect(screen, White, [420, 525, 400, 50], 0)
pygame.draw.rect(screen, White, [420, 615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img, (52, 50))
screen.blit(img, (B0[b], 525))
if button_text0 == '手动状态:':
for t in range(5):
if button_text[t] == '结 束':
button_text[t] = '开 始'
color[t] = Green
elif button_text0 == '自动状态:':
if button_text[0] == '结 束':
response0 = urllib.request.urlopen(line[0])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[0] = '开 始'
button_text[1] = '结 束'
elif button_text[1] == '结 束':
response0 = urllib.request.urlopen(line[1])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[1] = '开 始'
button_text[2] = '结 束'
elif button_text[2] == '结 束':
response0 = urllib.request.urlopen(line[2])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[2] = '开 始'
button_text[3] = '结 束'
elif button_text[3] == '结 束':
response0 = urllib.request.urlopen(line[3])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[3] = '开 始'
button_text[4] = '结 束'
elif button_text[4] == '结 束':
response0 = urllib.request.urlopen(line[4])
html0 = response0.read()
text0 = json.loads(html0)
print(text0)
button_text[4] = '开 始'
for i in B:
Process(i[0], i[1], i[2], i[3], i[4])
for v in C:
Station(v[0], v[1], v[2], v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index == 0:
if 597 <= pos[0] <= 662 and 440 <= pos[1] <= 468:
if button_text0 == '自动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '手动状态:'
color = [Green, Green, Green, Green, Green]
elif button_text0 == '手动状态:' and button_text == [
'开 始', '开 始', '开 始', '开 始', '开 始']:
button_text0 = '自动状态:'
button_text[0] = '结 束'
color = [Gray, Gray, Gray, Gray, Gray]
for i in B:
if i[1] <= pos[0] <= i[1] + 60 and i[2] <= pos[
1] <= i[2] + 25:
if button_text == ['开 始', '开 始', '开 始',
'开 始', '开 始'
] and button_text0 == '手动状态:':
color[i[0]] = Red
button_text[i[0]] = '结 束'
response1 = urllib.request.urlopen(line
[i[0]])
html1 = response1.read()
text1 = json.loads(html1)
print(text1)
for v in C:
if v[1] <= pos[0] <= v[1] + 60 and v[2] <= pos[
1] <= v[2] + 28:
response3 = urllib.request.urlopen(line0
[v[0]])
html3 = response3.read()
text3 = json.loads(html3)
pygame.draw.rect(screen, White, [420,
525, 400, 50], 0)
pygame.draw.rect(screen, White, [420,
615, 400, 30], 0)
img = pygame.image.load('car.jpg')
img = pygame.transform.smoothscale(img,
(52, 50))
screen.blit(img, (B0[int(text3)], 525))
C = [[0, 452, 490, CGQ[v[0]][0]], [1,
522, 490, CGQ[v[0]][1]], [2, 592,
490, CGQ[v[0]][2]], [3, 662, 490,
CGQ[v[0]][3]], [4, 732, 490, CGQ[v[
0]][4]]]
for f in C:
Station(f[0], f[1], f[2], f[3])
pygame.display.update()
|
import time
import json
import pygame
from pygame.locals import *
import urllib.request
from pygame.color import THECOLORS
pygame.init()
Brack=[0,0,0]
White=[255,255,255]
Green=[0,255,0]
Red=[255,0,0]
Gray=[169,169,169]
button_text=["开 始","开 始","开 始","开 始","开 始"]
line=['http://localhost:5050/mixer/000','http://localhost:5050/mixer/100','http://localhost:5050/mixer/200','http://localhost:5050/mixer/300','http://localhost:5050/mixer/400']
line0=['http://localhost:5000/carrier/moveto/0','http://localhost:5000/carrier/moveto/1','http://localhost:5000/carrier/moveto/2','http://localhost:5000/carrier/moveto/3','http://localhost:5000/carrier/moveto/4']
CGQ=[[0,1,1,1,1],[1,0,1,1,1],[1,1,0,1,1],[1,1,1,0,1],[1,1,1,1,0]]
color=[Green,Green,Green,Green,Green]
button_text0="手动状态:"
button_text1=["工位0","工位1","工位2","工位3","工位4"]
Num=['0','1','2','3','4']
B0=[452,522,592,662,732]
screen = pygame.display.set_mode((1240,768),FULLSCREEN,32)
screen.fill(Brack)
pygame.draw.rect(screen,White,[420,134,400,500],0)
text=["工 序 甲:","工 序 乙:","工 序 丙:","工 序 丁:","工 序 戊:"]
text_0=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",22)
text_1=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",18)
text_2=pygame.font.Font("/usr/share/fonts/truetype/wqy/wqy-zenhei.ttc",15)
text_fmt0=text_0.render("操 作 界 面",2,Brack)
screen.blit(text_fmt0,(545,140))
pygame.display.update()
def Process(num,x,y,button_text,color):
text_fmt1=text_1.render(text[num],1,Brack)
screen.blit(text_fmt1,(x-127,y))
pygame.draw.rect(screen,Brack,[x,y,60,25],2)
pygame.draw.rect(screen,color,[x+2,y+2,57,22],0)
button=text_2.render(button_text,1,Brack)
screen.blit(button,(x+13,y+3))
pygame.display.update()
def Station(num,x,y,a):
pygame.draw.rect(screen,Brack,[x,y,55,28],2)
pygame.draw.rect(screen,Green,[x+2,y+2,52,25],0)
button=text_2.render(button_text1[num],1,Brack)
screen.blit(button,(x+9,y+4))
img=pygame.image.load('cgq.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(x,y+80))
button=text_1.render(Num[a],1,Brack)
screen.blit(button,(x+20,610))
pygame.display.update()
if __name__ == '__main__':
while True:
time.sleep(1.5)
pygame.draw.rect(screen,White,[506,440,85,28],0)
pygame.draw.rect(screen,Brack,[597,440,65,28],2)
pygame.draw.rect(screen,Green,[599,442,62,25],0)
button1=text_1.render("切 换",1,Brack)
screen.blit(button1,(611,444))
button=text_1.render(button_text0,1,Brack)
screen.blit(button,(506,444))
B=[[0,647,190,button_text[0],color[0]],[1,647,240,button_text[1],color[1]],[2,647,290,button_text[2],color[2]],[3,647,340,button_text[3],color[3]],[4,647,390,button_text[4],color[4]]]
if button_text==["开 始","开 始","开 始","开 始","开 始"]:
response2=urllib.request.urlopen('http://localhost:5000/carrier/status')
html2=response2.read()
text2=json.loads(html2)
a=text2['sensors']
b=text2['pos']
C=[[0,452,490,a[0]],[1,522,490,a[1]],[2,592,490,a[2]],[3,662,490,a[3]],[4,732,490,a[4]]]
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[b],525))
if button_text0=="手动状态:":
for t in range(5):
if button_text[t]=="结 束":
button_text[t]="开 始"
color[t]=Green
elif button_text0=="自动状态:":
if button_text[0]=="结 束":
response0=urllib.request.urlopen(line[0])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[0]="开 始"
button_text[1]="结 束"
elif button_text[1]=="结 束":
response0=urllib.request.urlopen(line[1])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[1]="开 始"
button_text[2]="结 束"
elif button_text[2]=="结 束":
response0=urllib.request.urlopen(line[2])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[2]="开 始"
button_text[3]="结 束"
elif button_text[3]=="结 束":
response0=urllib.request.urlopen(line[3])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[3]="开 始"
button_text[4]="结 束"
elif button_text[4]=="结 束":
response0=urllib.request.urlopen(line[4])
html0=response0.read()
text0=json.loads(html0)
print(text0)
button_text[4]="开 始"
for i in B:
Process(i[0],i[1],i[2],i[3],i[4])
for v in C:
Station(v[0],v[1],v[2],v[3])
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
exit()
elif event.type == QUIT:
exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
pressed_array = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
for index in range(len(pressed_array)):
if pressed_array[index]:
if index==0:
if 597<=pos[0]<=662 and 440<=pos[1]<=468:
if button_text0=="自动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="手动状态:"
color=[Green,Green,Green,Green,Green]
elif button_text0=="手动状态:" and button_text==["开 始","开 始","开 始","开 始","开 始"]:
button_text0="自动状态:"
button_text[0]="结 束"
color=[Gray,Gray,Gray,Gray,Gray]
for i in B:
if i[1]<=pos[0]<=i[1]+60 and i[2]<=pos[1]<=i[2]+25:
if button_text==["开 始","开 始","开 始","开 始","开 始"] and button_text0=="手动状态:":
color[i[0]]=Red
button_text[i[0]]="结 束"
response1=urllib.request.urlopen(line[i[0]])
html1=response1.read()
text1=json.loads(html1)
print(text1)
for v in C:
if v[1]<=pos[0]<=v[1]+60 and v[2]<=pos[1]<=v[2]+28:
response3=urllib.request.urlopen(line0[v[0]])
html3=response3.read()
text3=json.loads(html3)
pygame.draw.rect(screen,White,[420,525,400,50],0)
pygame.draw.rect(screen,White,[420,615,400,30],0)
img=pygame.image.load('car.jpg')
img=pygame.transform.smoothscale(img,(52,50))
screen.blit(img,(B0[int(text3)],525))
C=[[0,452,490,CGQ[v[0]][0]],[1,522,490,CGQ[v[0]][1]],[2,592,490,CGQ[v[0]][2]],[3,662,490,CGQ[v[0]][3]],[4,732,490,CGQ[v[0]][4]]]
for f in C:
Station(f[0],f[1],f[2],f[3])
pygame.display.update()
|
[
2,
3,
4,
5,
6
] |
2,175 |
10723f703f40b5db2b7c9532cda520b2ae078546
|
<mask token>
def lane_emden_int(dz=2.0 ** -14, n=3.0, w=0.0):
"""
Interface to FORTRAN90 Lane-Emden Integrator.
Call:
ndata, data = laneemden.lane_emden_int(dz, n, w)
INPUT:
dz:
step in z, maye use 2**(-14)
n:
polytropic index (use 3.)
w:
rotation parameter(use 0. for non-rot)
w = 2 Omega^2 / (4 pi G rho_c)
OUTPUT:
ndata:
number of last point (starts with 0)
data:
output data in form [0:ndata,0:1]
index 0:
equidistant grid with step size dz starting at 0
index 1:
0: theta(z)
1: d theta(z) / dz
"""
_solver.lane(dz, n, w)
out = _solver.laneout
n = int(out.ndata)
t = out.theta
return n, t[0:n + 1, :]
def lane_emden_step(x, y, dx, n, w):
"""
This allows a single call to the rk4 subroutine.
It turns out to be *way* less efficient.
Do not use.
"""
_solver.rk4(x, y[0], y[1], dx, n, w)
out = _solver.rk4out
return np.array([out.z0, out.z1])
<mask token>
|
<mask token>
def test():
"""
A simple test.
"""
n = 3.0
dz = 2.0 ** -14
_solver.lane(dz, n)
out = _solver.laneout
n = out.ndata
t = out.theta
return t, n
def lane_emden_int(dz=2.0 ** -14, n=3.0, w=0.0):
"""
Interface to FORTRAN90 Lane-Emden Integrator.
Call:
ndata, data = laneemden.lane_emden_int(dz, n, w)
INPUT:
dz:
step in z, maye use 2**(-14)
n:
polytropic index (use 3.)
w:
rotation parameter(use 0. for non-rot)
w = 2 Omega^2 / (4 pi G rho_c)
OUTPUT:
ndata:
number of last point (starts with 0)
data:
output data in form [0:ndata,0:1]
index 0:
equidistant grid with step size dz starting at 0
index 1:
0: theta(z)
1: d theta(z) / dz
"""
_solver.lane(dz, n, w)
out = _solver.laneout
n = int(out.ndata)
t = out.theta
return n, t[0:n + 1, :]
def lane_emden_step(x, y, dx, n, w):
"""
This allows a single call to the rk4 subroutine.
It turns out to be *way* less efficient.
Do not use.
"""
_solver.rk4(x, y[0], y[1], dx, n, w)
out = _solver.rk4out
return np.array([out.z0, out.z1])
<mask token>
|
<mask token>
def test():
"""
A simple test.
"""
n = 3.0
dz = 2.0 ** -14
_solver.lane(dz, n)
out = _solver.laneout
n = out.ndata
t = out.theta
return t, n
def lane_emden_int(dz=2.0 ** -14, n=3.0, w=0.0):
"""
Interface to FORTRAN90 Lane-Emden Integrator.
Call:
ndata, data = laneemden.lane_emden_int(dz, n, w)
INPUT:
dz:
step in z, maye use 2**(-14)
n:
polytropic index (use 3.)
w:
rotation parameter(use 0. for non-rot)
w = 2 Omega^2 / (4 pi G rho_c)
OUTPUT:
ndata:
number of last point (starts with 0)
data:
output data in form [0:ndata,0:1]
index 0:
equidistant grid with step size dz starting at 0
index 1:
0: theta(z)
1: d theta(z) / dz
"""
_solver.lane(dz, n, w)
out = _solver.laneout
n = int(out.ndata)
t = out.theta
return n, t[0:n + 1, :]
def lane_emden_step(x, y, dx, n, w):
"""
This allows a single call to the rk4 subroutine.
It turns out to be *way* less efficient.
Do not use.
"""
_solver.rk4(x, y[0], y[1], dx, n, w)
out = _solver.rk4out
return np.array([out.z0, out.z1])
if __name__ == '__main__':
t, n = test()
print(t, n)
|
<mask token>
import numpy as np
from . import _solver
def test():
"""
A simple test.
"""
n = 3.0
dz = 2.0 ** -14
_solver.lane(dz, n)
out = _solver.laneout
n = out.ndata
t = out.theta
return t, n
def lane_emden_int(dz=2.0 ** -14, n=3.0, w=0.0):
"""
Interface to FORTRAN90 Lane-Emden Integrator.
Call:
ndata, data = laneemden.lane_emden_int(dz, n, w)
INPUT:
dz:
step in z, maye use 2**(-14)
n:
polytropic index (use 3.)
w:
rotation parameter(use 0. for non-rot)
w = 2 Omega^2 / (4 pi G rho_c)
OUTPUT:
ndata:
number of last point (starts with 0)
data:
output data in form [0:ndata,0:1]
index 0:
equidistant grid with step size dz starting at 0
index 1:
0: theta(z)
1: d theta(z) / dz
"""
_solver.lane(dz, n, w)
out = _solver.laneout
n = int(out.ndata)
t = out.theta
return n, t[0:n + 1, :]
def lane_emden_step(x, y, dx, n, w):
"""
This allows a single call to the rk4 subroutine.
It turns out to be *way* less efficient.
Do not use.
"""
_solver.rk4(x, y[0], y[1], dx, n, w)
out = _solver.rk4out
return np.array([out.z0, out.z1])
if __name__ == '__main__':
t, n = test()
print(t, n)
|
#! /bin/env python3
"""
Lane Emden Python interface.
Main routine:
lane_emden_int(dz, n)
"""
import numpy as np
from . import _solver
def test():
"""
A simple test.
"""
n = 3.
dz = 2.**(-14)
_solver.lane(dz,n)
out = _solver.laneout
n = out.ndata
t = out.theta
return t,n
def lane_emden_int(dz = 2.**(-14), n = 3., w = 0.):
"""
Interface to FORTRAN90 Lane-Emden Integrator.
Call:
ndata, data = laneemden.lane_emden_int(dz, n, w)
INPUT:
dz:
step in z, maye use 2**(-14)
n:
polytropic index (use 3.)
w:
rotation parameter(use 0. for non-rot)
w = 2 Omega^2 / (4 pi G rho_c)
OUTPUT:
ndata:
number of last point (starts with 0)
data:
output data in form [0:ndata,0:1]
index 0:
equidistant grid with step size dz starting at 0
index 1:
0: theta(z)
1: d theta(z) / dz
"""
_solver.lane(dz, n, w)
out = _solver.laneout
n = int(out.ndata)
t = out.theta
return n,t[0:n+1,:]
def lane_emden_step(x,y,dx,n,w):
"""
This allows a single call to the rk4 subroutine.
It turns out to be *way* less efficient.
Do not use.
"""
_solver.rk4(x,y[0],y[1],dx,n,w)
out = _solver.rk4out
return np.array([out.z0,out.z1])
if __name__ == '__main__':
t,n = test()
print(t, n)
|
[
2,
3,
4,
5,
6
] |
2,176 |
e4bfa0a55fe0dbb547bc5f65554ef96be654ec7a
|
<mask token>
class SubscriptionHandler(object):
<mask token>
<mask token>
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces
=NAMESPACES):
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(M.ResponseMessages(M.
SubscribeResponseMessage(M.ResponseCode('NoError'), M.
SubscriptionId(subscription_id), ResponseClass='Success')))
<mask token>
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0
].text
return M.GetStreamingEventsResponse(M.ResponseMessages(M.
GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.
Notifications(M.Notification(T.SubscriptionId(sub_id), self.
_generate_event('NewMailEvent'))), ResponseClass='Success')))
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=
NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(M.ResponseMessages(M.
UnsubscribeResponseMessage(M.ResponseCode('NoError'),
ResponseClass='Success')))
<mask token>
|
<mask token>
class SubscriptionHandler(object):
"""
SoapSeller handler for the streaming requests.
"""
def __init__(self, resource):
self.resource = resource
self.subscription_to_resource = {}
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces
=NAMESPACES):
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(M.ResponseMessages(M.
SubscribeResponseMessage(M.ResponseCode('NoError'), M.
SubscriptionId(subscription_id), ResponseClass='Success')))
def _generate_event(self, type):
return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id
=get_random_string(), ChangeKey=get_random_string()), T.
ParentFolderId(Id=get_random_string(), ChangeKey=
get_random_string()))
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0
].text
return M.GetStreamingEventsResponse(M.ResponseMessages(M.
GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.
Notifications(M.Notification(T.SubscriptionId(sub_id), self.
_generate_event('NewMailEvent'))), ResponseClass='Success')))
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=
NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(M.ResponseMessages(M.
UnsubscribeResponseMessage(M.ResponseCode('NoError'),
ResponseClass='Success')))
<mask token>
|
<mask token>
class SubscriptionHandler(object):
"""
SoapSeller handler for the streaming requests.
"""
def __init__(self, resource):
self.resource = resource
self.subscription_to_resource = {}
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces
=NAMESPACES):
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(M.ResponseMessages(M.
SubscribeResponseMessage(M.ResponseCode('NoError'), M.
SubscriptionId(subscription_id), ResponseClass='Success')))
def _generate_event(self, type):
return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id
=get_random_string(), ChangeKey=get_random_string()), T.
ParentFolderId(Id=get_random_string(), ChangeKey=
get_random_string()))
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0
].text
return M.GetStreamingEventsResponse(M.ResponseMessages(M.
GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.
Notifications(M.Notification(T.SubscriptionId(sub_id), self.
_generate_event('NewMailEvent'))), ResponseClass='Success')))
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=
NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(M.ResponseMessages(M.
UnsubscribeResponseMessage(M.ResponseCode('NoError'),
ResponseClass='Success')))
@pytest.mark.django_db
def test_listener(settings, space_resource, exchange, monkeypatch):
email = '%[email protected]' % get_random_string()
ex_resource = ExchangeResource.objects.create(resource=space_resource,
principal_email=email, exchange=exchange, sync_to_respa=True)
assert ex_resource.reservations.count() == 0
delegate = SubscriptionHandler(ex_resource)
SoapSeller.wire(settings, delegate)
notification_listener = listener.NotificationListener()
synced_resources = []
def sync_resource(resource):
synced_resources.append(resource)
notification_listener.stop()
monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)
notification_listener.start()
assert ex_resource in synced_resources
|
import pytest
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from respa_exchange import listener
from respa_exchange.ews.xml import M, NAMESPACES, T
from respa_exchange.models import ExchangeResource
from respa_exchange.tests.session import SoapSeller
class SubscriptionHandler(object):
"""
SoapSeller handler for the streaming requests.
"""
def __init__(self, resource):
self.resource = resource
self.subscription_to_resource = {}
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces
=NAMESPACES):
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(M.ResponseMessages(M.
SubscribeResponseMessage(M.ResponseCode('NoError'), M.
SubscriptionId(subscription_id), ResponseClass='Success')))
def _generate_event(self, type):
return getattr(T, type)(T.TimeStamp(now().isoformat()), T.ItemId(Id
=get_random_string(), ChangeKey=get_random_string()), T.
ParentFolderId(Id=get_random_string(), ChangeKey=
get_random_string()))
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES):
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0
].text
return M.GetStreamingEventsResponse(M.ResponseMessages(M.
GetStreamingEventsResponseMessage(M.ResponseCode('NoError'), M.
Notifications(M.Notification(T.SubscriptionId(sub_id), self.
_generate_event('NewMailEvent'))), ResponseClass='Success')))
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES):
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=
NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(M.ResponseMessages(M.
UnsubscribeResponseMessage(M.ResponseCode('NoError'),
ResponseClass='Success')))
@pytest.mark.django_db
def test_listener(settings, space_resource, exchange, monkeypatch):
email = '%[email protected]' % get_random_string()
ex_resource = ExchangeResource.objects.create(resource=space_resource,
principal_email=email, exchange=exchange, sync_to_respa=True)
assert ex_resource.reservations.count() == 0
delegate = SubscriptionHandler(ex_resource)
SoapSeller.wire(settings, delegate)
notification_listener = listener.NotificationListener()
synced_resources = []
def sync_resource(resource):
synced_resources.append(resource)
notification_listener.stop()
monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)
notification_listener.start()
assert ex_resource in synced_resources
|
import pytest
from django.utils.crypto import get_random_string
from django.utils.timezone import now
from respa_exchange import listener
from respa_exchange.ews.xml import M, NAMESPACES, T
from respa_exchange.models import ExchangeResource
from respa_exchange.tests.session import SoapSeller
class SubscriptionHandler(object):
"""
SoapSeller handler for the streaming requests.
"""
def __init__(self, resource):
self.resource = resource
self.subscription_to_resource = {}
def handle_subscribe(self, request):
if not request.xpath('//m:StreamingSubscriptionRequest', namespaces=NAMESPACES): # pragma: no cover
return
emails = request.xpath('//t:EmailAddress', namespaces=NAMESPACES)
assert len(emails) == 1
assert emails[0].text == self.resource.principal_email
subscription_id = get_random_string(10)
self.subscription_to_resource[subscription_id] = self.resource
return M.SubscribeResponse(
M.ResponseMessages(
M.SubscribeResponseMessage(
M.ResponseCode('NoError'),
M.SubscriptionId(subscription_id),
ResponseClass='Success',
),
),
)
def _generate_event(self, type):
return getattr(T, type)(
T.TimeStamp(now().isoformat()),
T.ItemId(
Id=get_random_string(),
ChangeKey=get_random_string(),
),
T.ParentFolderId(
Id=get_random_string(),
ChangeKey=get_random_string(),
),
)
def handle_get_events(self, request):
if not request.xpath('//m:GetStreamingEvents', namespaces=NAMESPACES): # pragma: no cover
return
sub_id = request.xpath('//t:SubscriptionId', namespaces=NAMESPACES)[0].text
# This would be a long-polling operation,
# but ain't nobody got time for that
return M.GetStreamingEventsResponse(
M.ResponseMessages(
M.GetStreamingEventsResponseMessage(
M.ResponseCode('NoError'),
M.Notifications(
M.Notification(
T.SubscriptionId(sub_id),
self._generate_event('NewMailEvent'),
),
),
ResponseClass='Success',
),
),
)
def handle_unsubscribe(self, request):
if not request.xpath('//m:Unsubscribe', namespaces=NAMESPACES): # pragma: no cover
return
subscription_id = request.xpath('//m:SubscriptionId', namespaces=NAMESPACES)[0].text
self.subscription_to_resource.pop(subscription_id)
return M.UnsubscribeResponse(
M.ResponseMessages(
M.UnsubscribeResponseMessage(
M.ResponseCode('NoError'),
ResponseClass='Success',
),
),
)
@pytest.mark.django_db
def test_listener(settings, space_resource, exchange, monkeypatch):
email = '%[email protected]' % get_random_string()
ex_resource = ExchangeResource.objects.create(
resource=space_resource,
principal_email=email,
exchange=exchange,
sync_to_respa=True,
)
assert ex_resource.reservations.count() == 0
delegate = SubscriptionHandler(ex_resource)
SoapSeller.wire(settings, delegate)
notification_listener = listener.NotificationListener()
synced_resources = [] # Keep track of the resources we get sync-request events for
def sync_resource(resource): # Our pretend sync handler
synced_resources.append(resource)
# Ask the listener to stop after we get a resource,
# so this test actually ends someday:
notification_listener.stop()
monkeypatch.setattr(listener, 'sync_from_exchange', sync_resource)
notification_listener.start()
# ... so when `sync_resource` is called, this'll eventually happen:
assert ex_resource in synced_resources
|
[
4,
7,
8,
9,
10
] |
2,177 |
91cf6d08be2ad86c08de4dd48b2f35dedc55b4bb
|
<mask token>
class FASTGIString(GIPlatformInterface):
<mask token>
def __init__(self, number, sender):
"""Initialise GI string.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGIString.0x' + str(number))
self.number = number
self.send = sender
<mask token>
def on(self, brightness=255):
"""Turn on GI string."""
if brightness >= 255:
brightness = 255
self.log.debug('Turning On GI String to brightness %s', brightness)
self.send('GI:{},{}'.format(self.number, Util.int_to_hex_string(
brightness)))
|
<mask token>
class FASTGIString(GIPlatformInterface):
<mask token>
def __init__(self, number, sender):
"""Initialise GI string.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGIString.0x' + str(number))
self.number = number
self.send = sender
def off(self):
"""Turn off GI string."""
self.log.debug('Turning Off GI String')
self.send('GI:' + self.number + ',00')
def on(self, brightness=255):
"""Turn on GI string."""
if brightness >= 255:
brightness = 255
self.log.debug('Turning On GI String to brightness %s', brightness)
self.send('GI:{},{}'.format(self.number, Util.int_to_hex_string(
brightness)))
|
<mask token>
class FASTGIString(GIPlatformInterface):
"""A FAST GI string in a WPC machine."""
def __init__(self, number, sender):
"""Initialise GI string.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGIString.0x' + str(number))
self.number = number
self.send = sender
def off(self):
"""Turn off GI string."""
self.log.debug('Turning Off GI String')
self.send('GI:' + self.number + ',00')
def on(self, brightness=255):
"""Turn on GI string."""
if brightness >= 255:
brightness = 255
self.log.debug('Turning On GI String to brightness %s', brightness)
self.send('GI:{},{}'.format(self.number, Util.int_to_hex_string(
brightness)))
|
<mask token>
import logging
from mpf.core.utility_functions import Util
from mpf.platforms.interfaces.gi_platform_interface import GIPlatformInterface
class FASTGIString(GIPlatformInterface):
"""A FAST GI string in a WPC machine."""
def __init__(self, number, sender):
"""Initialise GI string.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGIString.0x' + str(number))
self.number = number
self.send = sender
def off(self):
"""Turn off GI string."""
self.log.debug('Turning Off GI String')
self.send('GI:' + self.number + ',00')
def on(self, brightness=255):
"""Turn on GI string."""
if brightness >= 255:
brightness = 255
self.log.debug('Turning On GI String to brightness %s', brightness)
self.send('GI:{},{}'.format(self.number, Util.int_to_hex_string(
brightness)))
|
"""GI on fast."""
import logging
from mpf.core.utility_functions import Util
from mpf.platforms.interfaces.gi_platform_interface import GIPlatformInterface
class FASTGIString(GIPlatformInterface):
"""A FAST GI string in a WPC machine."""
def __init__(self, number, sender):
"""Initialise GI string.
TODO: Need to implement the enable_relay and control which strings are
dimmable.
"""
self.log = logging.getLogger('FASTGIString.0x' + str(number))
self.number = number
self.send = sender
def off(self):
"""Turn off GI string."""
self.log.debug("Turning Off GI String")
self.send('GI:' + self.number + ',00')
def on(self, brightness=255):
"""Turn on GI string."""
if brightness >= 255:
brightness = 255
self.log.debug("Turning On GI String to brightness %s", brightness)
# self.send('GI:' + self.number + ',' + Util.int_to_hex_string(brightness))
self.send('GI:{},{}'.format(self.number,
Util.int_to_hex_string(brightness)))
|
[
3,
4,
5,
6,
7
] |
2,178 |
93ec15a37bd5f022e8f6e226e3bf0e91cc0457c6
|
class Node:
<mask token>
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
if not root.children:
return [root.val]
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return result
<mask token>
|
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
if not root.children:
return [root.val]
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return result
<mask token>
|
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
if not root.children:
return [root.val]
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return result
<mask token>
print(result)
|
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return []
if not root.children:
return [root.val]
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return result
n5 = Node(5, None)
n6 = Node(6, None)
n3 = Node(2, None)
n4 = Node(4, None)
n2 = Node(3, [n5, n6])
n1 = Node(1, [n2, n3, n4])
s = Solution()
result = s.postorder(n1)
print(result)
|
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def postorder(self, root):
"""
:type root: Node
:rtype: List[int]
"""
if not root:
return([])
if not root.children:
return([root.val])
result = []
for child in root.children:
result += self.postorder(child)
result += [root.val]
return(result)
n5 = Node(5,None)
n6 = Node(6,None)
n3 = Node(2,None)
n4 = Node(4,None)
n2 = Node(3,[n5,n6])
n1 = Node(1,[n2,n3,n4])
s = Solution()
result = s.postorder(n1)
print(result)
|
[
3,
4,
5,
6,
7
] |
2,179 |
d4683d055ca70f31b050f0d84cb93c030feb4593
|
<mask token>
def twitter_authenticate():
return
<mask token>
def get_tweets():
return
|
<mask token>
def twitter_authenticate():
return
<mask token>
def remove_dupes():
return
def get_tweets():
return
|
<mask token>
def twitter_authenticate():
return
def identify_dupes():
return
def remove_dupes():
return
def get_tweets():
return
|
import twitter
def twitter_authenticate():
return
def identify_dupes():
return
def remove_dupes():
return
def get_tweets():
return
|
import twitter
def twitter_authenticate():
return;
def identify_dupes():
return;
def remove_dupes():
return;
def get_tweets():
return;
|
[
2,
3,
4,
5,
6
] |
2,180 |
445bb8ad8dadd207a3546f4623de583fc47a2910
|
<mask token>
|
<mask token>
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
<mask token>
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
import random
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
# Exercício Python 20: O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada.
import random
aluno1 = input('Primeiro aluno: ')
aluno2 = input('Segundo aluno: ')
aluno3 = input('Terceiro aluno: ')
aluno4 = input('Quarto aluno: ')
listaAlunos = [aluno1, aluno2, aluno3, aluno4]
# o shuffle embaralha os dados da lista
random.shuffle(listaAlunos)
print('A ordem de apresentação será ', listaAlunos)
|
[
0,
1,
2,
3,
4
] |
2,181 |
74be250df785590ecf45e048b0d6189e2b445889
|
<mask token>
|
print('HELLO3')
|
print("HELLO3")
| null | null |
[
0,
1,
2
] |
2,182 |
351963bee76ecaa9fa5c8d659f6d7c6ca9b22531
|
<mask token>
|
<mask token>
urlpatterns = [path('signup/', views.signup, name='signup'), path('home',
views.home, name='home'), path('collab/', views.collab, name='collab')]
|
from django.urls import path
from django.conf.urls.i18n import urlpatterns
from . import views
urlpatterns = [path('signup/', views.signup, name='signup'), path('home',
views.home, name='home'), path('collab/', views.collab, name='collab')]
|
from django.urls import path
from django.conf.urls.i18n import urlpatterns
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('home', views.home, name='home'),
path('collab/', views.collab, name='collab'),
]
| null |
[
0,
1,
2,
3
] |
2,183 |
a05c94ae0ee41cfef5687f741e07a54ae793e40d
|
<mask token>
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',
logConsole=False)
session = udaExec.connect(method='odbc', system=db, username=usr,
password=pwd, driver=driver, charset='UTF8', autoCommit='True',
USEREGIONALSETTINGS='N', transactionMode='TERADATA')
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser('~')
config_path = os.path.join(path, '.twbcfg.ini')
log_path = os.path.join(path, 'tmp', 'teradata_logs')
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = (
f"CheckpointDirectory='{log_path}' \n LogDirectory='{log_path}' "
)
with open(config_path, 'w') as f:
f.write(config)
<mask token>
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
<mask token>
|
<mask token>
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',
logConsole=False)
session = udaExec.connect(method='odbc', system=db, username=usr,
password=pwd, driver=driver, charset='UTF8', autoCommit='True',
USEREGIONALSETTINGS='N', transactionMode='TERADATA')
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser('~')
config_path = os.path.join(path, '.twbcfg.ini')
log_path = os.path.join(path, 'tmp', 'teradata_logs')
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = (
f"CheckpointDirectory='{log_path}' \n LogDirectory='{log_path}' "
)
with open(config_path, 'w') as f:
f.write(config)
<mask token>
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=
'', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=
None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + '.' + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=
False)
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})"
, [list(row.values()) for row in df_dict[i * batch_size:i *
batch_size + batch_size]], batch=True)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +
local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=
';', encoding='utf8')
converted_len = converted.apply(lambda x: x.str.encode('utf-8')
.apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',
header=None, encoding='utf8', low_memory=False, nrows=100000)
columns_query = f'select * from {table} where 1=0'
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist(
)
session.close()
shutil.copy(path_to_file, path_to_folder + '/tmp.csv')
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode(
'utf-8').apply(len)).max().to_dict()
td_temp_table = table + '_tmp_' + local_seed
session = get_session(bd, username, password)
session.execute(
f'create multiset table {td_temp_table} as {table} with no data no primary index'
)
session.close()
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
p = subprocess.Popen(shlex.split(
f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))
p.wait()
print('Merging in Teradata... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'insert into {table} sel * from {td_temp_table}')
session.close()
print('Cleaning... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'drop table {td_temp_table}')
session.close()
shutil.rmtree(path_to_folder)
print('Done!')
|
<mask token>
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',
logConsole=False)
session = udaExec.connect(method='odbc', system=db, username=usr,
password=pwd, driver=driver, charset='UTF8', autoCommit='True',
USEREGIONALSETTINGS='N', transactionMode='TERADATA')
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser('~')
config_path = os.path.join(path, '.twbcfg.ini')
log_path = os.path.join(path, 'tmp', 'teradata_logs')
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = (
f"CheckpointDirectory='{log_path}' \n LogDirectory='{log_path}' "
)
with open(config_path, 'w') as f:
f.write(config)
def td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',
fast=False, return_df=False, csv=True, chunksize=100000):
"""
Функция возвращает данные из ТД: путь к csv или датафрейм.
fast=True - использовать утилиты ТД, False - ODBC;
return_df - вернуть датафрейм;
csv - записать данные в файл при fast=False;
chunksize - размер бача для ODBC;
query должен содержать where, чтобы выгрузить название столбцов из БД
"""
local_seed = str(random.randint(0, 1000000))
query = query.replace('\n', ' ')
if not fast:
session = get_session(bd, username, password)
frame = sql2df(query, session, chunksize=chunksize)
session.close()
if return_df:
return frame
else:
path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +
local_seed)
if csv:
filename = path_to_file + '.csv'
frame.to_csv(filename, sep=';', index=False, encoding='utf8')
return filename
else:
dump(frame, path_to_file)
return path_to_file
else:
check_config()
query = query.replace("'", "''")
path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +
local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
os.mkdir(path_to_folder)
else:
os.mkdir(path_to_folder)
path_to_file = os.path.join(path_to_folder, 'dataset.csv')
open(path_to_file, 'w').close()
txt = (
"""SourceTdpId = '%s'
,SourceUserName = '%s'
,SourceUserPassword = '%s'
,DDLPrivateLogName = 'ddlprivate.log'
,ExportPrivateLogName = 'exportprivate.log'
,TargetErrorList = ['3807']
,TargetFileName = '%s'
,TargetFormat = 'delimited'
,TargetTextDelimiter = ';'
,TargetOpenMode = 'write'
,SelectStmt = '%s' """
% (bd, username, password, path_to_file, query))
qtxt = """USING CHAR SET UTF-8
DEFINE JOB qstart2
(
APPLY TO OPERATOR ($FILE_WRITER)
SELECT * FROM OPERATOR($EXPORT);
);"""
with open(path_to_folder + '/qstart2.txt', 'w+') as f:
f.write(qtxt)
with open(path_to_folder + '/jobvars.txt', 'w+') as f:
f.write(txt)
p = subprocess.run(shlex.split(
f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'
), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
query = query.replace('\n', ' ').replace("''", "'")
query = query.lower()
query_list = query.split('where')
if len(query_list) == 2:
columns_query = ' where 1=0 and '.join(query_list)
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist(
)
session.close()
else:
print("Coudn't load columns names")
columns_names = None
if not return_df:
if columns_names:
with open(path_to_folder + '/columns_names.txt', 'w') as f:
f.write('\n'.join(columns_names))
return path_to_file
else:
if columns_names:
frame = pd.read_csv(path_to_file, names=columns_names,
delimiter=';')
else:
frame = pd.read_csv(path_to_file, header=None, delimiter=';')
return frame
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=
'', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=
None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + '.' + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=
False)
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})"
, [list(row.values()) for row in df_dict[i * batch_size:i *
batch_size + batch_size]], batch=True)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +
local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=
';', encoding='utf8')
converted_len = converted.apply(lambda x: x.str.encode('utf-8')
.apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',
header=None, encoding='utf8', low_memory=False, nrows=100000)
columns_query = f'select * from {table} where 1=0'
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist(
)
session.close()
shutil.copy(path_to_file, path_to_folder + '/tmp.csv')
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode(
'utf-8').apply(len)).max().to_dict()
td_temp_table = table + '_tmp_' + local_seed
session = get_session(bd, username, password)
session.execute(
f'create multiset table {td_temp_table} as {table} with no data no primary index'
)
session.close()
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
p = subprocess.Popen(shlex.split(
f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))
p.wait()
print('Merging in Teradata... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'insert into {table} sel * from {td_temp_table}')
session.close()
print('Cleaning... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'drop table {td_temp_table}')
session.close()
shutil.rmtree(path_to_folder)
print('Done!')
|
import os
import numpy as np
import pandas as pd
import random
import platform
import subprocess
import shlex
import teradata
from joblib import dump
import shutil
from tqdm import tqdm
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1',
logConsole=False)
session = udaExec.connect(method='odbc', system=db, username=usr,
password=pwd, driver=driver, charset='UTF8', autoCommit='True',
USEREGIONALSETTINGS='N', transactionMode='TERADATA')
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser('~')
config_path = os.path.join(path, '.twbcfg.ini')
log_path = os.path.join(path, 'tmp', 'teradata_logs')
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = (
f"CheckpointDirectory='{log_path}' \n LogDirectory='{log_path}' "
)
with open(config_path, 'w') as f:
f.write(config)
def td_download(query='', bd='tdsb15.cgs.sbrf.ru', username='', password='',
fast=False, return_df=False, csv=True, chunksize=100000):
"""
Функция возвращает данные из ТД: путь к csv или датафрейм.
fast=True - использовать утилиты ТД, False - ODBC;
return_df - вернуть датафрейм;
csv - записать данные в файл при fast=False;
chunksize - размер бача для ODBC;
query должен содержать where, чтобы выгрузить название столбцов из БД
"""
local_seed = str(random.randint(0, 1000000))
query = query.replace('\n', ' ')
if not fast:
session = get_session(bd, username, password)
frame = sql2df(query, session, chunksize=chunksize)
session.close()
if return_df:
return frame
else:
path_to_file = os.path.join(os.getcwd(), 'data', 'input_' +
local_seed)
if csv:
filename = path_to_file + '.csv'
frame.to_csv(filename, sep=';', index=False, encoding='utf8')
return filename
else:
dump(frame, path_to_file)
return path_to_file
else:
check_config()
query = query.replace("'", "''")
path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' +
local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
os.mkdir(path_to_folder)
else:
os.mkdir(path_to_folder)
path_to_file = os.path.join(path_to_folder, 'dataset.csv')
open(path_to_file, 'w').close()
txt = (
"""SourceTdpId = '%s'
,SourceUserName = '%s'
,SourceUserPassword = '%s'
,DDLPrivateLogName = 'ddlprivate.log'
,ExportPrivateLogName = 'exportprivate.log'
,TargetErrorList = ['3807']
,TargetFileName = '%s'
,TargetFormat = 'delimited'
,TargetTextDelimiter = ';'
,TargetOpenMode = 'write'
,SelectStmt = '%s' """
% (bd, username, password, path_to_file, query))
qtxt = """USING CHAR SET UTF-8
DEFINE JOB qstart2
(
APPLY TO OPERATOR ($FILE_WRITER)
SELECT * FROM OPERATOR($EXPORT);
);"""
with open(path_to_folder + '/qstart2.txt', 'w+') as f:
f.write(qtxt)
with open(path_to_folder + '/jobvars.txt', 'w+') as f:
f.write(txt)
p = subprocess.run(shlex.split(
f'tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}'
), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
query = query.replace('\n', ' ').replace("''", "'")
query = query.lower()
query_list = query.split('where')
if len(query_list) == 2:
columns_query = ' where 1=0 and '.join(query_list)
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist(
)
session.close()
else:
print("Coudn't load columns names")
columns_names = None
if not return_df:
if columns_names:
with open(path_to_folder + '/columns_names.txt', 'w') as f:
f.write('\n'.join(columns_names))
return path_to_file
else:
if columns_names:
frame = pd.read_csv(path_to_file, names=columns_names,
delimiter=';')
else:
frame = pd.read_csv(path_to_file, header=None, delimiter=';')
return frame
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(username='', password='', bd='tdsb15.cgs.sbrf.ru', tbl_name=
'', schema='SBX_RETAIL_MP_PFM', loadframe=True, df=None, path_to_file=
None, fast=False, batch_size=12000, max_sessions=6, buffersize=524288):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + '.' + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=
False)
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})"
, [list(row.values()) for row in df_dict[i * batch_size:i *
batch_size + batch_size]], batch=True)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), 'data', 'output_' +
local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=
';', encoding='utf8')
converted_len = converted.apply(lambda x: x.str.encode('utf-8')
.apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype='str',
header=None, encoding='utf8', low_memory=False, nrows=100000)
columns_query = f'select * from {table} where 1=0'
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist(
)
session.close()
shutil.copy(path_to_file, path_to_folder + '/tmp.csv')
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode(
'utf-8').apply(len)).max().to_dict()
td_temp_table = table + '_tmp_' + local_seed
session = get_session(bd, username, password)
session.execute(
f'create multiset table {td_temp_table} as {table} with no data no primary index'
)
session.close()
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value * 2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
p = subprocess.Popen(shlex.split(
f'tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}'))
p.wait()
print('Merging in Teradata... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'insert into {table} sel * from {td_temp_table}')
session.close()
print('Cleaning... \r', end='', flush=True)
session = get_session(bd, username, password)
session.execute(f'drop table {td_temp_table}')
session.close()
shutil.rmtree(path_to_folder)
print('Done!')
|
import os
import numpy as np
import pandas as pd
import random
import platform
import subprocess
import shlex
import teradata
from joblib import dump
import shutil
from tqdm import tqdm
def get_session(db, usr, pwd):
"""Функция устанавливает соединение с ТД и возвращает сессию"""
if platform.system() == 'Windows':
driver = 'Teradata'
else:
driver = 'Teradata Database ODBC Driver 16.20'
udaExec = teradata.UdaExec(appName='DataLoad', version='0.1', logConsole=False)
session = udaExec.connect(method='odbc',
system=db, # Сервер ТД из файла
username=usr, # Логин TD
password=pwd, # Пароль TD
driver = driver,
charset='UTF8',
autoCommit='True',
USEREGIONALSETTINGS='N',
transactionMode = 'TERADATA'
)
return session
def sql2df(query, session, chunksize=100000):
""" Функция грузит из терадаты данные в батчах по 100к и склеивает их в одну таблицу """
db = pd.read_sql(query, session, chunksize=chunksize)
data = pd.DataFrame()
for x in tqdm(db):
data = pd.concat([data, x])
return data
def check_config():
""" .twbcfg.ini to root path """
path = os.path.expanduser("~")
config_path = os.path.join(path, ".twbcfg.ini")
log_path = os.path.join(path, "tmp", "teradata_logs")
if not os.path.exists(config_path):
if not os.path.exists(log_path):
os.mkdir(log_path)
config = f'''CheckpointDirectory='{log_path}'
LogDirectory='{log_path}' '''
with open(config_path, 'w') as f:
f.write(config)
def td_download(query="",
bd="tdsb15.cgs.sbrf.ru",
username="", password="",
fast=False, return_df=False, csv=True,
chunksize=100000):
"""
Функция возвращает данные из ТД: путь к csv или датафрейм.
fast=True - использовать утилиты ТД, False - ODBC;
return_df - вернуть датафрейм;
csv - записать данные в файл при fast=False;
chunksize - размер бача для ODBC;
query должен содержать where, чтобы выгрузить название столбцов из БД
"""
local_seed = str(random.randint(0, 1000000))
query = query.replace("\n", " ")
if not fast:
# Teradata python package
session = get_session(bd, username, password)
frame = sql2df(query, session, chunksize=chunksize)
session.close()
if return_df:
return frame
else:
path_to_file = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if csv:
filename = path_to_file + ".csv"
frame.to_csv(filename, sep=';', index=False, encoding="utf8")
return filename
else:
dump(frame, path_to_file)
return path_to_file
else:
# FastLoad
check_config()
query = query.replace("'", "''") # prepair query for FastLoad
path_to_folder = os.path.join(os.getcwd(), 'data', 'input_' + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
os.mkdir(path_to_folder)
else:
os.mkdir(path_to_folder)
path_to_file = os.path.join(path_to_folder, 'dataset.csv')
open(path_to_file, 'w').close()
# Create utility files
txt = '''SourceTdpId = '%s'
,SourceUserName = '%s'
,SourceUserPassword = '%s'
,DDLPrivateLogName = 'ddlprivate.log'
,ExportPrivateLogName = 'exportprivate.log'
,TargetErrorList = ['3807']
,TargetFileName = '%s'
,TargetFormat = 'delimited'
,TargetTextDelimiter = ';'
,TargetOpenMode = 'write'
,SelectStmt = '%s' ''' % (bd, username, password, path_to_file, query)
qtxt = '''USING CHAR SET UTF-8
DEFINE JOB qstart2
(
APPLY TO OPERATOR ($FILE_WRITER)
SELECT * FROM OPERATOR($EXPORT);
);'''
with open(path_to_folder + '/qstart2.txt', 'w+') as f:
f.write(qtxt)
with open(path_to_folder + '/jobvars.txt', 'w+') as f:
f.write(txt)
# run FastLoad
# p = subprocess.Popen(
# shlex.split(f"tbuild -f {path_to_folder}/qstart2.txt -v {path_to_folder}/jobvars.txt -j qstart2")
# )
# p.wait()
p = subprocess.run(
shlex.split(f"tbuild -f {path_to_folder}/tdd.txt -v {path_to_folder}/jobvars.txt -j tdd_{str(local_seed)}"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# columns names
query = query.replace("\n", " ").replace("''","'")
query = query.lower()
query_list = query.split("where")
if len(query_list) == 2:
columns_query = " where 1=0 and ".join(query_list)
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
else:
print("Coudn't load columns names")
columns_names = None
if not return_df:
if columns_names:
with open(path_to_folder + '/columns_names.txt', 'w') as f:
f.write("\n".join(columns_names))
return path_to_file
else:
if columns_names:
frame = pd.read_csv(path_to_file, names=columns_names, delimiter=';')
else:
frame = pd.read_csv(path_to_file, header=None, delimiter=';')
return frame
def py2td(x):
"""Функция вставляет пропуски и корректирует тип данных под ТД"""
x_type = type(x)
if x_type == float:
if x % 1 == 0:
return int(x)
else:
return x
elif x == 'null':
return None
else:
return x
def td_import(
username="", password="",
bd="tdsb15.cgs.sbrf.ru", tbl_name="",
schema="SBX_RETAIL_MP_PFM",
loadframe=True, df=None, path_to_file=None, fast=False,
batch_size=12000, max_sessions=6, buffersize=524288,
):
"""
Функция записывате данные в ТД через утилиты или ODBC
"""
table = schema + "." + tbl_name
if not fast:
if not loadframe:
df = pd.read_csv(path_to_file, sep=';', encoding='utf8', index=False)
# insert
n_iters = len(df) // batch_size + (len(df) % batch_size > 0)
df_dict = df.to_dict('records')
session = get_session(bd, username, password)
for i in tqdm(range(n_iters), total=n_iters):
session.executemany(
f"INSERT INTO {table} VALUES ({','.join(list('?' * df.shape[1]))})",
[list(row.values()) for row in df_dict[i * batch_size:i * batch_size + batch_size]],
batch=True
)
session.close()
else:
check_config()
local_seed = str(random.randint(0, 1000000))
path_to_folder = os.path.join(os.getcwd(), "data", "output_" + local_seed)
if os.path.exists(path_to_folder):
shutil.rmtree(path_to_folder)
else:
os.mkdir(path_to_folder)
if loadframe:
converted = df.replace(np.NaN, '').astype(str)
path_to_file = path_to_folder + '/tmp.csv'
converted.to_csv(path_to_file, index=False, header=False, sep=";", encoding="utf8")
converted_len = converted.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
else:
converted_len = pd.read_csv(path_to_file, sep=';', dtype="str", header=None, encoding="utf8",
low_memory=False, nrows=100000)
columns_query = f"select * from {table} where 1=0"
session = get_session(bd, username, password)
columns_names = pd.read_sql(columns_query, session).columns.tolist()
session.close()
shutil.copy(path_to_file, path_to_folder + "/tmp.csv") # cp file for correct working Change to move&
converted_len.columns = columns_names
converted_len = converted_len.apply(lambda x: x.str.encode('utf-8').apply(len)).max().to_dict()
# create empty tmp table
td_temp_table = table + "_tmp_" + local_seed # change schema
session = get_session(bd, username, password)
session.execute(
f"create multiset table {td_temp_table} as {table} with no data no primary index"
)
session.close()
# Create utility file
txt = f"""USING CHARACTER SET UTF8
DEFINE JOB teradata_upload
Description 'Fastload script'
(
DEFINE OPERATOR Load_operator
TYPE LOAD
SCHEMA *
ATTRIBUTES
(
VARCHAR TdPid='{bd}',
VARCHAR UserName='{username}',
VARCHAR UserPassWord='{password}',
VARCHAR TargetTable='{td_temp_table}',
VARCHAR LogTable='{schema}.usr_tpt_log',
VARCHAR DateForm='AnsiDate',
INTEGER MaxSessions={max_sessions}
);
DEFINE SCHEMA Define_Employee_Schema
(
{','.join(f'{key} VARCHAR({max(1, value*2)})' for key, value in converted_len.items())}
);
DEFINE OPERATOR Producer_File_Detail
TYPE DATACONNECTOR PRODUCER
SCHEMA Define_Employee_Schema
ATTRIBUTES
(
VARCHAR DirectoryPath='{path_to_folder}/'
, VARCHAR FileName='tmp.csv'
, VARCHAR TextDelimiter=';'
, VARCHAR QuotedData = 'Optional'
, VARCHAR OpenQuoteMark = '"'
, VARCHAR CloseQuoteMark = '"'
, VARCHAR Format='Delimited'
, VARCHAR OpenMode='Read'
, VARCHAR INDICATORMODE='N'
, INTEGER BUFFERSIZE = {buffersize}
);
APPLY
(
'INSERT INTO {td_temp_table}({','.join(
f'{key}' for key, value in converted_len.items())}) VALUES (:{',:'.join(
f'{key}' for key, value in converted_len.items())});'
)
TO OPERATOR(Load_operator)
SELECT * FROM OPERATOR (Producer_File_Detail);
);"""
with open(path_to_folder + '/load_code.tpt', 'w+') as f:
f.write(txt)
# Start TPT load
p = subprocess.Popen(
shlex.split(f"tbuild -f {path_to_folder}/load_code.tpt -L {path_to_folder}")
)
p.wait()
# Merge
print("Merging in Teradata... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"insert into {table} sel * from {td_temp_table}")
session.close()
# Drop temporary table
print("Cleaning... \r", end='', flush=True)
session = get_session(bd, username, password)
session.execute(f"drop table {td_temp_table}")
session.close()
# Cleanup
shutil.rmtree(path_to_folder)
print("Done!")
|
[
4,
5,
6,
7,
8
] |
2,184 |
30b07e57737ac29643769c4773591199b2ba8656
|
<mask token>
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop=True, inplace=True)
return new_df
<mask token>
def process_num(num):
return float(re.sub('[^\\w\\s.]', '', num))
<mask token>
|
<mask token>
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop=True, inplace=True)
return new_df
<mask token>
def process_num(num):
return float(re.sub('[^\\w\\s.]', '', num))
<mask token>
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
<mask token>
clean_TMDB_movies.dropna(inplace=True)
<mask token>
movies_discretized['percent_profit']
<mask token>
movies_discretized.drop(columns=['day', 'release_date'], inplace=True)
<mask token>
for movie in movies_discretized['production_companies']:
if 'Universal' in movie:
production_company.append('Universal')
elif 'Sony' in movie:
production_company.append('Sony')
elif 'Fox' in movie:
production_company.append('Fox')
elif 'DreamWorks' in movie:
production_company.append('DW')
elif 'MGM' in movie:
production_company.append('MGM')
elif 'Paramount' in movie:
production_company.append('Paramount')
elif 'Disney' in movie:
production_company.append('Disney')
elif 'Warner Bros' in movie:
production_company.append('WB')
else:
production_company.append('None')
<mask token>
movies_discretized_count_df.drop(['counts', 'production_company_count'],
axis=1, inplace=True)
<mask token>
movies_discretized_count_df_week.drop(['counts', 'week_count'], axis=1,
inplace=True)
<mask token>
clean_IMDb.dropna(inplace=True)
<mask token>
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace=True)
<mask token>
print(revenue_covid)
<mask token>
print(AMC_revenue.head())
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
|
<mask token>
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop=True, inplace=True)
return new_df
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
def process_num(num):
return float(re.sub('[^\\w\\s.]', '', num))
num1 = float(re.sub('[^\\w\\s.]', '', '1,156.30'))
gross = []
year = []
film = []
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
movie_df = pd.DataFrame({'Gross': gross, 'first': year, 'Franchise': film})
movies_TMDB_kaggle = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv',
encoding='ISO-8859-1')
clean_TMDB_movies = movies_TMDB_kaggle.drop(columns=['homepage', 'id',
'overview', 'status', 'tagline', 'original_title'])
clean_TMDB_movies.dropna(inplace=True)
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies[
'budget']
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit'
] / clean_TMDB_movies['budget'] * 100
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies[
'release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies[
'release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
cat = list(range(1, 13))
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'],
ordered=True, categories=cat)
categories = ['very_low', 'low', 'high', 'very_high']
movies_discretized = clean_TMDB_movies
movies_discretized['budget'] = pd.cut(movies_discretized['budget'], [0,
13000000, 30000000, 62192550, 400000000], labels=categories)
movies_discretized['revenue'] = pd.cut(movies_discretized['revenue'], [0,
21458200, 62954020, 187976900, 2887965000], labels=categories)
categories_profit = ['negative', 'low', 'high', 'very_high']
movies_discretized['profit'] = pd.cut(movies_discretized['profit'], [-
165710100, 0, 29314900, 140784100, 2560965000], labels=categories_profit)
movies_discretized['vote_average'] = pd.cut(movies_discretized[
'vote_average'], [0, 6, 6.5, 7, 8.5], labels=categories)
movies_discretized['vote_count'] = pd.cut(movies_discretized['vote_count'],
[0, 440, 1151, 2522, 14000], labels=categories)
movies_discretized['percent_profit'] = pd.cut(movies_discretized[
'percent_profit'], [-100, 0, 108, 436, 6528], labels=categories_profit)
movies_discretized['percent_profit']
categories_weeks = ['week_1', 'week_2', 'week_3', 'week_4']
movies_discretized['week'] = pd.cut(movies_discretized['day'], [0, 8, 15,
22, 32], labels=categories_weeks)
movies_discretized.drop(columns=['day', 'release_date'], inplace=True)
production_company = []
for movie in movies_discretized['production_companies']:
if 'Universal' in movie:
production_company.append('Universal')
elif 'Sony' in movie:
production_company.append('Sony')
elif 'Fox' in movie:
production_company.append('Fox')
elif 'DreamWorks' in movie:
production_company.append('DW')
elif 'MGM' in movie:
production_company.append('MGM')
elif 'Paramount' in movie:
production_company.append('Paramount')
elif 'Disney' in movie:
production_company.append('Disney')
elif 'Warner Bros' in movie:
production_company.append('WB')
else:
production_company.append('None')
movies_discretized['main_production'] = production_company
movies_discretized_count = movies_discretized.groupby(['main_production',
'percent_profit'])['main_production'].count()
movies_discretized_count_df = pd.DataFrame(movies_discretized_count)
movies_discretized_count_df.columns = ['counts']
movies_discretized_count_df['production_company'
] = movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df['percent_profit_category'
] = movies_discretized_count_df.index.get_level_values(1)
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop=True
)
production_company_discretized_count_df = movies_discretized_count_df.groupby([
'production_company'])['counts'].sum()
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company']
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'DW'], 82)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Disney'], 116)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Fox'], 298)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'MGM'], 87)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'None'], 1782)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Paramount'], 235)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Sony'], 42)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Universal'], 282)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'WB'], 269)
movies_discretized_count_df['percent'] = movies_discretized_count_df['counts'
] / movies_discretized_count_df['production_company_count'] * 100
movies_discretized_count_df.drop(['counts', 'production_company_count'],
axis=1, inplace=True)
movies_discretized_count_week = movies_discretized.groupby(['week',
'percent_profit'])['week'].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
movies_discretized_count_df_week.columns = ['counts']
movies_discretized_count_df_week['week'
] = movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week['percent_profit_category'
] = movies_discretized_count_df_week.index.get_level_values(1)
movies_discretized_count_df_week = (movies_discretized_count_df_week.
reset_index(drop=True))
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby([
'week'])['counts'].sum()
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week']
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_1'], 783)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_2'], 817)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_3'], 782)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_4'], 811)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].astype(np.int64)
movies_discretized_count_df_week['percent'] = movies_discretized_count_df_week[
'counts'] / movies_discretized_count_df_week['week_count'] * 100
movies_discretized_count_df_week.drop(['counts', 'week_count'], axis=1,
inplace=True)
movies_IMDb = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb = movies_IMDb.drop(columns=['imdb_title_id', 'original_title',
'description', 'reviews_from_users', 'reviews_from_critics'])
clean_IMDb.dropna(inplace=True)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace=True)
revenue_covid = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue = pd.read_csv('C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv'
)
print(AMC_revenue.head())
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
|
<mask token>
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop=True, inplace=True)
return new_df
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
def process_num(num):
return float(re.sub('[^\\w\\s.]', '', num))
num1 = float(re.sub('[^\\w\\s.]', '', '1,156.30'))
gross = []
year = []
film = []
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
movie_df = pd.DataFrame({'Gross': gross, 'first': year, 'Franchise': film})
movies_TMDB_kaggle = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv',
encoding='ISO-8859-1')
clean_TMDB_movies = movies_TMDB_kaggle.drop(columns=['homepage', 'id',
'overview', 'status', 'tagline', 'original_title'])
clean_TMDB_movies.dropna(inplace=True)
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies[
'budget']
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit'
] / clean_TMDB_movies['budget'] * 100
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies[
'release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies[
'release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
cat = list(range(1, 13))
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'],
ordered=True, categories=cat)
categories = ['very_low', 'low', 'high', 'very_high']
movies_discretized = clean_TMDB_movies
movies_discretized['budget'] = pd.cut(movies_discretized['budget'], [0,
13000000, 30000000, 62192550, 400000000], labels=categories)
movies_discretized['revenue'] = pd.cut(movies_discretized['revenue'], [0,
21458200, 62954020, 187976900, 2887965000], labels=categories)
categories_profit = ['negative', 'low', 'high', 'very_high']
movies_discretized['profit'] = pd.cut(movies_discretized['profit'], [-
165710100, 0, 29314900, 140784100, 2560965000], labels=categories_profit)
movies_discretized['vote_average'] = pd.cut(movies_discretized[
'vote_average'], [0, 6, 6.5, 7, 8.5], labels=categories)
movies_discretized['vote_count'] = pd.cut(movies_discretized['vote_count'],
[0, 440, 1151, 2522, 14000], labels=categories)
movies_discretized['percent_profit'] = pd.cut(movies_discretized[
'percent_profit'], [-100, 0, 108, 436, 6528], labels=categories_profit)
movies_discretized['percent_profit']
categories_weeks = ['week_1', 'week_2', 'week_3', 'week_4']
movies_discretized['week'] = pd.cut(movies_discretized['day'], [0, 8, 15,
22, 32], labels=categories_weeks)
movies_discretized.drop(columns=['day', 'release_date'], inplace=True)
production_company = []
for movie in movies_discretized['production_companies']:
if 'Universal' in movie:
production_company.append('Universal')
elif 'Sony' in movie:
production_company.append('Sony')
elif 'Fox' in movie:
production_company.append('Fox')
elif 'DreamWorks' in movie:
production_company.append('DW')
elif 'MGM' in movie:
production_company.append('MGM')
elif 'Paramount' in movie:
production_company.append('Paramount')
elif 'Disney' in movie:
production_company.append('Disney')
elif 'Warner Bros' in movie:
production_company.append('WB')
else:
production_company.append('None')
movies_discretized['main_production'] = production_company
movies_discretized_count = movies_discretized.groupby(['main_production',
'percent_profit'])['main_production'].count()
movies_discretized_count_df = pd.DataFrame(movies_discretized_count)
movies_discretized_count_df.columns = ['counts']
movies_discretized_count_df['production_company'
] = movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df['percent_profit_category'
] = movies_discretized_count_df.index.get_level_values(1)
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop=True
)
production_company_discretized_count_df = movies_discretized_count_df.groupby([
'production_company'])['counts'].sum()
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company']
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'DW'], 82)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Disney'], 116)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Fox'], 298)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'MGM'], 87)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'None'], 1782)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Paramount'], 235)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Sony'], 42)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'Universal'], 282)
movies_discretized_count_df['production_company_count'
] = movies_discretized_count_df['production_company_count'].replace([
'WB'], 269)
movies_discretized_count_df['percent'] = movies_discretized_count_df['counts'
] / movies_discretized_count_df['production_company_count'] * 100
movies_discretized_count_df.drop(['counts', 'production_company_count'],
axis=1, inplace=True)
movies_discretized_count_week = movies_discretized.groupby(['week',
'percent_profit'])['week'].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
movies_discretized_count_df_week.columns = ['counts']
movies_discretized_count_df_week['week'
] = movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week['percent_profit_category'
] = movies_discretized_count_df_week.index.get_level_values(1)
movies_discretized_count_df_week = (movies_discretized_count_df_week.
reset_index(drop=True))
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby([
'week'])['counts'].sum()
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week']
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_1'], 783)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_2'], 817)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_3'], 782)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].replace(['week_4'], 811)
movies_discretized_count_df_week['week_count'
] = movies_discretized_count_df_week['week_count'].astype(np.int64)
movies_discretized_count_df_week['percent'] = movies_discretized_count_df_week[
'counts'] / movies_discretized_count_df_week['week_count'] * 100
movies_discretized_count_df_week.drop(['counts', 'week_count'], axis=1,
inplace=True)
movies_IMDb = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb = movies_IMDb.drop(columns=['imdb_title_id', 'original_title',
'description', 'reviews_from_users', 'reviews_from_critics'])
clean_IMDb.dropna(inplace=True)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace=True)
revenue_covid = pd.read_csv(
'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue = pd.read_csv('C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv'
)
print(AMC_revenue.head())
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 9 18:52:17 2021
@author: lewis
"""
import csv
import pandas as pd
import re
import statistics
import matplotlib.pyplot as plt
import numpy as np
from bs4 import BeautifulSoup
from urllib.request import urlopen
#Creating a function that groups by, counts, creates a new column from the index, drops the index and changes the column names
def groupby_count(df, groupby_column, count_column):
new_df = pd.DataFrame(df.groupby(groupby_column)[count_column].count())
new_df.columns = ['count']
new_df[groupby_column] = new_df.index.get_level_values(0)
new_df.reset_index(drop = True, inplace = True)
return(new_df)
url = 'https://en.wikipedia.org/wiki/Film_series'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
tables = soup.find_all('table')
#Create a function to process the string into an integer by using re.sub()
def process_num(num):
return float(re.sub(r'[^\w\s.]','',num))
#test function
num1 = float(re.sub(r'[^\w\s.]','','1,156.30'))
#print(num1)
#Create array to hold the data extracted
gross=[]
year=[]
film=[]
for table in tables:
rows = table.find_all('tr')
for row in rows:
cells = row.find_all('td')
if len(cells) > 1:
Franchise = cells[1]
film.append(Franchise.text.strip())
Gross = cells[6]
gross.append(process_num(Gross.text.strip()))
first = cells[7]
year.append(int(first.text))
# put the data in the pandas dataframe
movie_df= pd.DataFrame({'Gross': gross,
'first': year,
'Franchise': film
})
#print(movie_df)
#print(movie_df.dtypes)
#movies_df_count = movie_df.groupby(["Franchise", "first"])["first"].count()
#print(movies_df_count)
#WIKI_df=movie_df.groupby(["first"])["first"].count()
#print(WIKI_df)
#WIKI_df.plot(kind='bar',x='first',y='count')
#plt.title("Most Movies Release count by Year(Top 68 on WIKI)",fontsize=20)
#TMDB Kaggle Data
movies_TMDB_kaggle= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/tmdb_5000_movies.csv', encoding= 'ISO-8859-1')
#print(len(movies_TMDB_kaggle)) #result 4803 and 20 columns
#print(movies_TMDB_kaggle.isnull().sum()) #tagline and homepage has the most NaN, unnecessary columns
#Clean the dataframe, removed any unnecessary columns
clean_TMDB_movies= movies_TMDB_kaggle.drop(columns=['homepage', 'id', 'overview', 'status', 'tagline', 'original_title'])
#print(clean_TMDB_movies) #result 4803 rows and 14 columns
#print(clean_TMDB_movies.isnull().sum()) # NaNs in the release_date and runtime column
clean_TMDB_movies.dropna(inplace= True)
#print(clean_TMDB_movies.isnull().sum())
#Removing any movie that has a budget of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['budget'] != 0]
#Removing any movie with a revenue of 0
clean_TMDB_movies = clean_TMDB_movies[clean_TMDB_movies['revenue'] != 0]
#review the profit for each movie therefore a profit column was created
clean_TMDB_movies['profit'] = clean_TMDB_movies['revenue'] - clean_TMDB_movies['budget']
#Creating a percent profit column in order to compare profits.
clean_TMDB_movies['percent_profit'] = clean_TMDB_movies['profit']/clean_TMDB_movies['budget']*100
#print the top five
#print(clean_TMDB_movies.head())
#checking the data types
#print(clean_TMDB_movies.dtypes)
#change release_date to the date/time and separate it by month, day, and year
clean_TMDB_movies['release_date'] = pd.to_datetime(clean_TMDB_movies['release_date'])
clean_TMDB_movies['month'], clean_TMDB_movies['day'] = clean_TMDB_movies['release_date'].dt.month, clean_TMDB_movies['release_date'].dt.day
#After new columns were added it is time to concat.
cat = list(range(1,13))
#Changing the month data type from int to ordered category
clean_TMDB_movies['month'] = pd.Categorical(clean_TMDB_movies['month'], ordered = True, categories = cat)
#confirmation
#print(clean_TMDB_movies.month.dtype)
#print(len(clean_TMDB_movies))
#print(clean_TMDB_movies.describe())
#print(clean_TMDB_movies.revenue.describe())
#print(clean_TMDB_movies.profit.describe())
#print(clean_TMDB_movies.vote_count.describe())
#print(clean_TMDB_movies.percent_profit.describe())
#discretize the budget column
categories = ["very_low", "low", "high", "very_high"]
#saving the clean_TMDB df as a discretized df
movies_discretized = clean_TMDB_movies
#creating a budget cutoff using pandas cut function
movies_discretized["budget"] = pd.cut(movies_discretized["budget"], [0, 13000000, 30000000, 62192550, 400000000], labels = categories)
#repeat the step for revenue
#print(movies_discretized.revenue.describe())
movies_discretized["revenue"] = pd.cut(movies_discretized["revenue"], [0, 21458200, 62954020, 187976900, 2887965000], labels = categories)
#profit
categories_profit = ["negative", "low", "high", "very_high"]
movies_discretized["profit"] = pd.cut(movies_discretized["profit"], [-165710100 , 0, 29314900, 140784100, 2560965000], labels = categories_profit)
#print(movies_discretized["profit"].head())
#Vote_average-very_low: vote averages less than 6, low are between 6 to 6.5, high between 6.5 and 7 and very_high 7 and 8.5
movies_discretized["vote_average"] = pd.cut(movies_discretized["vote_average"], [0, 6, 6.5, 7, 8.5], labels = categories)
#print(movies_discretized["vote_average"].head())
#Vote_count
movies_discretized["vote_count"] = pd.cut(movies_discretized["vote_count"], [0, 440, 1151, 2522, 14000], labels = categories)
#print(movies_discretized["vote_count"].head())
#percent_profit
movies_discretized["percent_profit"] = pd.cut(movies_discretized["percent_profit"], [-100, 0, 108, 436, 6528], labels = categories_profit)
movies_discretized["percent_profit"]
#Categorizing days into weeks
#print(movies_discretized.day.describe())
categories_weeks = ["week_1", "week_2", "week_3", "week_4"]
movies_discretized["week"] = pd.cut(movies_discretized["day"], [0, 8, 15, 22, 32], labels = categories_weeks)
#print(movies_discretized["week"].head())
#day and release_date are no longer needed columns
movies_discretized.drop(columns=['day', 'release_date'], inplace = True)
#print(movies_discretized.head())
#Do major production companies have an impact the profit margin?
production_company = []
for movie in movies_discretized['production_companies']:
if "Universal" in movie:
production_company.append("Universal")
elif "Sony" in movie:
production_company.append("Sony")
elif "Fox" in movie:
production_company.append("Fox")
elif "DreamWorks" in movie:
production_company.append("DW")
elif "MGM" in movie:
production_company.append("MGM")
elif "Paramount" in movie:
production_company.append("Paramount")
elif "Disney" in movie:
production_company.append("Disney")
elif "Warner Bros" in movie:
production_company.append("WB")
else:
production_company.append("None")
movies_discretized["main_production"] = production_company
#print(movies_discretized["main_production"].head())
movies_discretized_count = movies_discretized.groupby(["main_production", "percent_profit"])["main_production"].count()
movies_discretized_count_df= pd.DataFrame(movies_discretized_count)
#print(movies_discretized_count_df)
#change the last column to count instead of main production
movies_discretized_count_df.columns = ["counts"]
#print(movies_discretized_count_df.head())
#total count for the number of percent_profit counts for each main production.
movies_discretized_count_df["production_company"]=movies_discretized_count_df.index.get_level_values(0)
movies_discretized_count_df["percent_profit_category"] = movies_discretized_count_df.index.get_level_values(1)
#print(movies_discretized_count_df)
#drop the indexes to create another column with the sum of the counts of each production
movies_discretized_count_df = movies_discretized_count_df.reset_index(drop = True)
#The sum of each production company category.
production_company_discretized_count_df = movies_discretized_count_df.groupby(["production_company"])["counts"].sum()
#print(production_company_discretized_count_df)
#column with the overall counts for each production, construct a new column called production company count that replicates the production company, and then use the replace function to replace the 1s and 2s with the total count
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["DW"], 82)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Disney"], 116)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Fox"], 298)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["MGM"], 87)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["None"], 1782)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Paramount"], 235)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Sony"], 42)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["Universal"], 282)
movies_discretized_count_df["production_company_count"] = movies_discretized_count_df["production_company_count"].replace(["WB"], 269)
#print(movies_discretized_count_df)
#percentage
movies_discretized_count_df["percent"] = movies_discretized_count_df["counts"]/movies_discretized_count_df["production_company_count"] *100
#print(movies_discretized_count_df.head())
#dropping production_company_count and count column no longer needed
movies_discretized_count_df.drop(["counts", "production_company_count"], axis = 1, inplace = True )
#graphing question 1 using Matplot lib
#graph = movies_discretized_count_df.pivot("production_company", "percent_profit_category","percent").plot(kind="bar", color= ['blue', 'green', 'purple', 'red'], title='Profit Margin amongst Production Companies')
#change the x and y axis for graph
#plt.ylabel("Percent Profit")
#plt.xlabel("Production")
#plt.xticks(rotation = 0)
#position the legends underneath the graph; Now the graph looks beautiful
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit Category")
#plt.show()
#Question 2: Is it true that the month in which a film is released has an impact on its profit margin?
movies_discretized_count_week = movies_discretized.groupby(["week", "percent_profit"])["week"].count()
movies_discretized_count_df_week = pd.DataFrame(movies_discretized_count_week)
#Checking the dataframe
#print(movies_discretized_count_df_week)
#changing column that is labeled week to count
movies_discretized_count_df_week.columns = ["counts"]
#total count for the number of % profit for each week
movies_discretized_count_df_week["week"]=movies_discretized_count_df_week.index.get_level_values(0)
movies_discretized_count_df_week["percent_profit_category"] = movies_discretized_count_df_week.index.get_level_values(1)
#print(movies_discretized_count_df_week)
movies_discretized_count_df_week = movies_discretized_count_df_week.reset_index(drop = True) #drop the index
#what is the sum of each production
sum_discretized_count_df_week = movies_discretized_count_df_week.groupby(["week"])["counts"].sum()
#print(sum_discretized_count_df_week) #the sums are centered around 700-800s
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week"]
#Now replacing the income level with the total count for each income level
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_1"], 783)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_2"], 817)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_3"], 782)
movies_discretized_count_df_week["week_count"] = movies_discretized_count_df_week["week_count"].replace(["week_4"], 811)
#print(movies_discretized_count_df_week.head())
#received an error Object with dtype category cannot perform the numpy op true_divide
movies_discretized_count_df_week["week_count"]= movies_discretized_count_df_week["week_count"].astype(np.int64)
#convert into percentage; counts/week_count * 100
movies_discretized_count_df_week["percent"] = movies_discretized_count_df_week["counts"]/movies_discretized_count_df_week["week_count"] *100
#print(movies_discretized_count_df_week.head())
#dropping the week_count and count column since the percent column is there those columns are no longer needed
movies_discretized_count_df_week.drop(["counts", "week_count"], axis = 1, inplace = True )
#Time to create a visual
#graph_question_2 = movies_discretized_count_df_week.pivot("week", "percent_profit_category", "percent").plot(kind="bar", color = ["blue", "green", "purple", "red"], title = "Impact of Percent Profit by Week")
#plt.ylabel("Percent")
#plt.xlabel("Week")
#plt.xticks(rotation = 0)
#plt.legend( loc = "lower center", bbox_to_anchor = (.5, -.4), ncol = 4, title = "Percent Profit")
#plt.show()
#IMDb Kaggle Data
movies_IMDb= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/IMDb_movies.csv')
clean_IMDb= movies_IMDb.drop(columns=['imdb_title_id','original_title','description', 'reviews_from_users', 'reviews_from_critics'])
#print(clean_IMDb) #85,855 rows and 17 columns
#print(clean_IMDb.isnull().sum())
clean_IMDb.dropna(inplace = True) #drop all the NaNs
#print(clean_IMDb.isnull().sum()) #no more NaNs
#print(len(clean_IMDb)) #6635
#print(clean_IMDb.dtypes)
# QUESTION 3: How does budget impact vote average?
#plt.plot(clean_IMDb.budget, clean_IMDb.avg_vote, 'o')
#plt.title('How does Budget Impact Vote Average?')
#plt.xlabel('Budget')
#plt.ylabel('Vote Average')
#plt.show()
#print(clean_IMDb['budget'].head())
#print the top five
#print(clean_IMDb.head())
#Using the groupby_count function that takes the following arguments (df, groupby_column, count_column)
IMDb_movies_genre = groupby_count(clean_IMDb, 'genre', 'genre')
#Sorting the df, so the bar graph will be in descending order
IMDb_movies_genre.sort_values(['count'], ascending=[False], inplace = True)
#Statista movie theatre revenue and prediction to 2025 post COVID saving to a pd dataframe
revenue_covid= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/revenue_covid_impact.csv')
print(revenue_covid)
AMC_revenue= pd.read_csv(r'C:/Users/lewis/OneDrive/Documents/MovieData/AMC.csv')
#print(AMC_revenue)
#print(AMC_revenue.info())
print(AMC_revenue.head())
#During 2020, AMC Theatres reported annual revenues of 1.24 billion U.S. dollars, a dramatic decrease from previous years as a consequence of the COVID-19 pandemic.
plt.plot(AMC_revenue.Year, AMC_revenue.Money, 'o')
plt.title('AMC revenue over 15 years')
plt.xlabel('Year')
plt.ylabel('Revenue')
plt.show()
#Global box office revenue coronavirus impact 2020-2025
#revenue_covid.plot(x="Year", y=["Originalforecast", "Marchrevision", "Julyrevision"], kind="bar")
#plt.show()
|
[
2,
3,
4,
5,
6
] |
2,185 |
fb64003c1acbddcbe952a17edcbf293a54ef28ae
|
<mask token>
class InowasFlopyCalculationAdapter:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
<mask token>
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
<mask token>
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
<mask token>
|
<mask token>
class InowasFlopyCalculationAdapter:
<mask token>
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
<mask token>
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
<mask token>
from .BasAdapter import BasAdapter
from .ChdAdapter import ChdAdapter
from .DisAdapter import DisAdapter
from .GhbAdapter import GhbAdapter
from .LpfAdapter import LpfAdapter
from .MfAdapter import MfAdapter
from .NwtAdapter import NwtAdapter
from .OcAdapter import OcAdapter
from .PcgAdapter import PcgAdapter
from .RchAdapter import RchAdapter
from .RivAdapter import RivAdapter
from .ReadBudget import ReadBudget
from .ReadDrawdown import ReadDrawdown
from .ReadHead import ReadHead
from .UpwAdapter import UpwAdapter
from .WelAdapter import WelAdapter
from .LmtAdapter import LmtAdapter
from .MtAdapter import MtAdapter
from .AdvAdapter import AdvAdapter
from .BtnAdapter import BtnAdapter
from .DspAdapter import DspAdapter
from .GcgAdapter import GcgAdapter
from .LktAdapter import LktAdapter
from .PhcAdapter import PhcAdapter
from .RctAdapter import RctAdapter
from .SftAdapter import SftAdapter
from .SsmAdapter import SsmAdapter
from .TobAdapter import TobAdapter
from .UztAdapter import UztAdapter
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = ['mf', 'dis', 'bas', 'bas6', 'riv', 'wel', 'rch',
'chd', 'ghb', 'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6']
mt_package_order = ['mt', 'btn', 'adv', 'dsp', 'gcg', 'ssm', 'lkt',
'phc', 'rct', 'sft', 'tob', 'uzt']
def __init__(self, version, data, uuid):
self._mf_data = data.get('mf')
self._mt_data = data.get('mt')
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get('write_input'):
self.write_input_model(self._mf)
if self._mf_data.get('run_model'):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get('write_input'):
self.write_input_model(self._mt)
if self._mt_data.get('run_model'):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data['packages']:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
"""
This module is an intermediate layer between flopy version 3.2
and the inowas-modflow-configuration format.
Author: Ralf Junghanns
EMail: [email protected]
"""
from .BasAdapter import BasAdapter
from .ChdAdapter import ChdAdapter
from .DisAdapter import DisAdapter
from .GhbAdapter import GhbAdapter
from .LpfAdapter import LpfAdapter
from .MfAdapter import MfAdapter
from .NwtAdapter import NwtAdapter
from .OcAdapter import OcAdapter
from .PcgAdapter import PcgAdapter
from .RchAdapter import RchAdapter
from .RivAdapter import RivAdapter
from .ReadBudget import ReadBudget
from .ReadDrawdown import ReadDrawdown
from .ReadHead import ReadHead
from .UpwAdapter import UpwAdapter
from .WelAdapter import WelAdapter
from .LmtAdapter import LmtAdapter
from .MtAdapter import MtAdapter
from .AdvAdapter import AdvAdapter
from .BtnAdapter import BtnAdapter
from .DspAdapter import DspAdapter
from .GcgAdapter import GcgAdapter
from .LktAdapter import LktAdapter
from .PhcAdapter import PhcAdapter
from .RctAdapter import RctAdapter
from .SftAdapter import SftAdapter
from .SsmAdapter import SsmAdapter
from .TobAdapter import TobAdapter
from .UztAdapter import UztAdapter
class InowasFlopyCalculationAdapter:
"""The Flopy Class"""
_version = None
_uuid = None
_mf = None
_mt = None
_report = ''
mf_package_order = [
'mf', 'dis', 'bas', 'bas6',
'riv', 'wel', 'rch', 'chd', 'ghb',
'lpf', 'upw', 'pcg', 'nwt', 'oc', 'lmt', 'lmt6'
]
mt_package_order = [
"mt", "btn", "adv", "dsp", "gcg", "ssm", "lkt",
"phc", "rct", "sft", "tob", "uzt"
]
def __init__(self, version, data, uuid):
self._mf_data = data.get("mf")
self._mt_data = data.get("mt")
self._version = version
self._uuid = uuid
if self._mf_data is not None:
package_content = self.read_packages(self._mf_data)
self.create_model(self.mf_package_order, package_content)
if self._mf_data.get("write_input"):
self.write_input_model(self._mf)
if self._mf_data.get("run_model"):
self._report += self.run_model(self._mf)
if self._mt_data is not None:
package_content = self.read_packages(self._mt_data)
self.create_model(self.mt_package_order, package_content)
if self._mt_data.get("write_input"):
self.write_input_model(self._mt)
if self._mt_data.get("run_model"):
self._report += self.run_model(self._mt)
@staticmethod
def read_packages(data):
package_content = {}
for package in data["packages"]:
print('Read Flopy Package: %s' % package)
package_content[package.lower()] = data[package]
return package_content
def create_model(self, package_order, package_content):
for package in package_order:
if package in package_content:
print('Create Flopy Package: %s' % package)
self.create_package(package, package_content[package])
@staticmethod
def write_input_model(model):
print('Write %s input files' % model)
model.write_input()
@staticmethod
def run_model(model):
print('Run the %s model' % model)
print(model.namefile)
print(model.exe_name)
success, report = model.run_model(report=True, silent=True)
return ' \n'.join(str(e) for e in report + [success])
def check_model(self):
if self._mf is not None:
self._mf.check()
if self._mt is not None:
self._mt.check()
def create_package(self, name, content):
# Modlfow packages
if name == 'mf':
self._mf = MfAdapter(content).get_package()
if name == 'dis':
DisAdapter(content).get_package(self._mf)
if name == 'bas' or name == 'bas6':
BasAdapter(content).get_package(self._mf)
if name == 'lpf':
LpfAdapter(content).get_package(self._mf)
if name == 'upw':
UpwAdapter(content).get_package(self._mf)
if name == 'pcg':
PcgAdapter(content).get_package(self._mf)
if name == 'nwt':
NwtAdapter(content).get_package(self._mf)
if name == 'oc':
OcAdapter(content).get_package(self._mf)
if name == 'riv':
RivAdapter(content).get_package(self._mf)
if name == 'wel':
WelAdapter(content).get_package(self._mf)
if name == 'rch':
RchAdapter(content).get_package(self._mf)
if name == 'chd':
ChdAdapter(content).get_package(self._mf)
if name == 'ghb':
GhbAdapter(content).get_package(self._mf)
if name == 'lmt':
LmtAdapter(content).get_package(self._mf)
# MT3D packages
if name == 'mt':
self._mt = MtAdapter(content).get_package(self._mf)
if name == 'adv':
AdvAdapter(content).get_package(self._mt)
if name == 'btn':
BtnAdapter(content).get_package(self._mt)
if name == 'dsp':
DspAdapter(content).get_package(self._mt)
if name == 'gcg':
GcgAdapter(content).get_package(self._mt)
if name == 'lkt':
LktAdapter(content).get_package(self._mt)
if name == 'phc':
PhcAdapter(content).get_package(self._mt)
if name == 'rct':
RctAdapter(content).get_package(self._mt)
if name == 'sft':
SftAdapter(content).get_package(self._mt)
if name == 'ssm':
SsmAdapter(content).get_package(self._mt)
if name == 'tob':
TobAdapter(content).get_package(self._mt)
if name == 'uzt':
UztAdapter(content).get_package(self._mt)
def response(self):
key = 'mf'
if 'MF' in self._mf_data:
key = 'MF'
heads = ReadHead(self._mf_data[key]['model_ws'])
drawdowns = ReadDrawdown(self._mf_data[key]['model_ws'])
budgets = ReadBudget(self._mf_data[key]['model_ws'])
response = {}
response['heads'] = heads.read_times()
response['drawdowns'] = drawdowns.read_times()
response['budgets'] = budgets.read_times()
response['number_of_layers'] = heads.read_number_of_layers()
return response
def response_message(self):
return self._report
|
[
7,
11,
12,
13,
14
] |
2,186 |
4cc138016cb1f82e12c76c185be19188d3e38bf9
|
<mask token>
|
<mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
|
<mask token>
class Migration(migrations.Migration):
dependencies = [('api', '0006_order_date')]
operations = [migrations.RemoveField(model_name='order', name='product'
), migrations.AddField(model_name='order', name='product', field=
models.ManyToManyField(to='api.Product')), migrations.AlterField(
model_name='order', name='status', field=models.TextField(default=
'неплачено', max_length=50))]
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('api', '0006_order_date')]
operations = [migrations.RemoveField(model_name='order', name='product'
), migrations.AddField(model_name='order', name='product', field=
models.ManyToManyField(to='api.Product')), migrations.AlterField(
model_name='order', name='status', field=models.TextField(default=
'неплачено', max_length=50))]
|
# Generated by Django 3.0.5 on 2020-04-25 12:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_order_date'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='product',
),
migrations.AddField(
model_name='order',
name='product',
field=models.ManyToManyField(to='api.Product'),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.TextField(default='неплачено', max_length=50),
),
]
|
[
0,
1,
2,
3,
4
] |
2,187 |
77b7a0ae115aa063512ea7d6e91811470a4cf9d0
|
str = 'Hello world'
print ("字符串长度 : %d" %(len(str)))
print("字符串的长度 444:",len(str))
print (str)
print (str[0])
print (str[1:5])
print (str[:len(str)])
print (str[1:]*3)
print (str[1:]*5)
print ('字符串拼接')
print ("Hello" + "world")
#print ("python : str.join Test")
str1 = "-"
print (str1.join(str))
list = [1,2,3,4]
for a in str :
print ("当前字母:",a)
n = 0
for s in list :
print ("list[%d] :%d" %(n++,s));
| null | null | null | null |
[
0
] |
2,188 |
cdb07241e08f8ac85a427c5b2bc3effca3917c85
|
<mask token>
def main():
print('Output')
<mask token>
|
<mask token>
def main():
print('Output')
<mask token>
if __name__ == '__main__':
main()
<mask token>
print('Run time: {}'.format(end - start))
|
<mask token>
def main():
print('Output')
start = time.time()
if __name__ == '__main__':
main()
end = time.time()
print('Run time: {}'.format(end - start))
|
<mask token>
import time
def main():
print('Output')
start = time.time()
if __name__ == '__main__':
main()
end = time.time()
print('Run time: {}'.format(end - start))
|
# -*- coding: utf-8 -*-
"""
Project Euler - Problem XX
...
"""
# Imports
import time
# Global variables
# Lamda functions
# Functions
# Main functions
def main():
print('Output')
# Execute code
start = time.time()
if __name__ == "__main__":
main()
end = time.time()
print('Run time: {}'.format(end - start))
|
[
1,
2,
3,
4,
5
] |
2,189 |
b1b478965ad939a98478b19b4a94f3250167e25a
|
<mask token>
def show_examples(images_base, labels_base, index_list, output_path):
results = []
for index in tqdm(index_list):
img = cv2.imread(os.path.join(images_base, index + '.jpg'))
lab = np.array(Image.open(os.path.join(labels_base, index + '.png')
).convert('P'))
results += np.unique(lab).tolist()
return list(set(results))
def get_info(label_dir):
label_path = glob('%s/*' % label_dir)
total_area = []
total_number = []
for label_name in tqdm(label_path):
lab = np.array(Image.open(label_name).convert('P'))
masks = [(lab == v) for v in range(21)]
zz = np.mean(masks, axis=(1, 2))
total_area.append(zz.copy())
zz[zz > 0] = 1
total_number.append(zz)
print(np.sum(total_number, axis=0))
print(np.sum(total_area, axis=0))
<mask token>
|
<mask token>
def get_index(path):
"""
get the length of index for voc2012 dataset.
path: the index of train,val or test path
"""
with open(path, 'r') as f:
zz = f.readlines()
return [index.split('\n')[0] for index in zz]
def show_examples(images_base, labels_base, index_list, output_path):
results = []
for index in tqdm(index_list):
img = cv2.imread(os.path.join(images_base, index + '.jpg'))
lab = np.array(Image.open(os.path.join(labels_base, index + '.png')
).convert('P'))
results += np.unique(lab).tolist()
return list(set(results))
def get_info(label_dir):
label_path = glob('%s/*' % label_dir)
total_area = []
total_number = []
for label_name in tqdm(label_path):
lab = np.array(Image.open(label_name).convert('P'))
masks = [(lab == v) for v in range(21)]
zz = np.mean(masks, axis=(1, 2))
total_area.append(zz.copy())
zz[zz > 0] = 1
total_number.append(zz)
print(np.sum(total_number, axis=0))
print(np.sum(total_area, axis=0))
<mask token>
|
<mask token>
np.set_printoptions(precision=3, suppress=True)
def get_index(path):
"""
get the length of index for voc2012 dataset.
path: the index of train,val or test path
"""
with open(path, 'r') as f:
zz = f.readlines()
return [index.split('\n')[0] for index in zz]
def show_examples(images_base, labels_base, index_list, output_path):
results = []
for index in tqdm(index_list):
img = cv2.imread(os.path.join(images_base, index + '.jpg'))
lab = np.array(Image.open(os.path.join(labels_base, index + '.png')
).convert('P'))
results += np.unique(lab).tolist()
return list(set(results))
def get_info(label_dir):
label_path = glob('%s/*' % label_dir)
total_area = []
total_number = []
for label_name in tqdm(label_path):
lab = np.array(Image.open(label_name).convert('P'))
masks = [(lab == v) for v in range(21)]
zz = np.mean(masks, axis=(1, 2))
total_area.append(zz.copy())
zz[zz > 0] = 1
total_number.append(zz)
print(np.sum(total_number, axis=0))
print(np.sum(total_area, axis=0))
if __name__ == '__main__':
import shutil
output_dir = 'visual_results'
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'
imge_dir = '/data/VOCdevkit/VOC2012/JPEGImages'
label_dir = '/data/VOCdevkit/VOC2012/SegmentationClass'
print('train_index:', len(get_index(os.path.join(index_dir, 'train.txt'))))
print('val_index:', len(get_index(os.path.join(index_dir, 'val.txt'))))
print('test_index:', len(get_index(os.path.join(index_dir, 'test.txt'))))
train_results = show_examples(imge_dir, label_dir, get_index(os.path.
join(index_dir, 'train.txt')), output_dir)
train_results.sort()
print('train label:', len(train_results), train_results)
get_info(label_dir)
<mask token>
|
from glob import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
import cv2
import os
import matplotlib.pyplot as plt
np.set_printoptions(precision=3, suppress=True)
def get_index(path):
"""
get the length of index for voc2012 dataset.
path: the index of train,val or test path
"""
with open(path, 'r') as f:
zz = f.readlines()
return [index.split('\n')[0] for index in zz]
def show_examples(images_base, labels_base, index_list, output_path):
results = []
for index in tqdm(index_list):
img = cv2.imread(os.path.join(images_base, index + '.jpg'))
lab = np.array(Image.open(os.path.join(labels_base, index + '.png')
).convert('P'))
results += np.unique(lab).tolist()
return list(set(results))
def get_info(label_dir):
label_path = glob('%s/*' % label_dir)
total_area = []
total_number = []
for label_name in tqdm(label_path):
lab = np.array(Image.open(label_name).convert('P'))
masks = [(lab == v) for v in range(21)]
zz = np.mean(masks, axis=(1, 2))
total_area.append(zz.copy())
zz[zz > 0] = 1
total_number.append(zz)
print(np.sum(total_number, axis=0))
print(np.sum(total_area, axis=0))
if __name__ == '__main__':
import shutil
output_dir = 'visual_results'
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'
imge_dir = '/data/VOCdevkit/VOC2012/JPEGImages'
label_dir = '/data/VOCdevkit/VOC2012/SegmentationClass'
print('train_index:', len(get_index(os.path.join(index_dir, 'train.txt'))))
print('val_index:', len(get_index(os.path.join(index_dir, 'val.txt'))))
print('test_index:', len(get_index(os.path.join(index_dir, 'test.txt'))))
train_results = show_examples(imge_dir, label_dir, get_index(os.path.
join(index_dir, 'train.txt')), output_dir)
train_results.sort()
print('train label:', len(train_results), train_results)
get_info(label_dir)
<mask token>
|
from glob import glob
from PIL import Image
import numpy as np
from tqdm import tqdm
import cv2
import os
import matplotlib.pyplot as plt
np.set_printoptions(precision=3, suppress=True)
def get_index(path):
"""
get the length of index for voc2012 dataset.
path: the index of train,val or test path
"""
with open(path,'r') as f:
zz = f.readlines()
return [index.split("\n")[0] for index in zz]
def show_examples(images_base, labels_base, index_list, output_path):
results= []
for index in tqdm(index_list):
img = cv2.imread(os.path.join(images_base, index+".jpg"))
# lab = cv2.imread(os.path.join(labels_base, index+".png"), 0)
lab = np.array(Image.open(os.path.join(labels_base, index+".png")).convert('P'))
results+= np.unique(lab).tolist()
#
# plt.figure(figsize=(4,2))
# plt.subplot(121)
# plt.imshow(img)
# plt.title("images")
# plt.subplot(122)
# plt.imshow(lab)
# plt.title('label')
# plt.tight_layout()
# plt.savefig("%s/visual_%s.png"%(output_path, index), dpi=300)
# plt.show()
return list(set(results))
def get_info(label_dir):
label_path = glob("%s/*" % label_dir)
total_area = []
total_number = []
for label_name in tqdm(label_path):
lab = np.array(Image.open(label_name).convert('P'))
# print(lab.shape)
masks = [(lab == v) for v in range(21)]
# get each class area of images
zz = np.mean(masks, axis =(1, 2))
total_area.append(zz.copy())
# get exist class of images
zz[zz > 0] = 1
total_number.append(zz)
print(np.sum(total_number, axis=0))
print(np.sum(total_area, axis=0))
if __name__=="__main__":
import shutil
output_dir = "visual_results"
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
index_dir = '/data/VOCdevkit/VOC2012/ImageSets/Segmentation'
imge_dir = "/data/VOCdevkit/VOC2012/JPEGImages"
label_dir = "/data/VOCdevkit/VOC2012/SegmentationClass"
print("train_index:", len(get_index( os.path.join(index_dir, "train.txt") ) ) ) # 1464
print("val_index:", len( get_index( os.path.join(index_dir, "val.txt") ) ) ) # 1449
print("test_index:", len( get_index( os.path.join(index_dir, "test.txt") ) ) ) #1456
train_results= show_examples(imge_dir, label_dir, get_index(os.path.join(index_dir, "train.txt")), output_dir)
train_results.sort()
print("train label:", len(train_results), train_results)
get_info(label_dir)
"""
train label: 20 [0, 14, 19, 33, 37, 38, 52, 57, 72, 75, 89, 94, 108, 112, 113, 128, 132, 147, 150, 220]
number of each class:
[2903. 178. 144. 208. 150. 183. 152. 255. 250. 271. 135. 157. 249. 147. 157. 888. 167. 120. 183. 167. 157.]
are of each class:
[2019.413 21.703 8.608 23.93 16.14 19.298 49.044 40.491
68.606 27.83 28.275 33.941 51.712 27.909 30.196 139.84
16.282 22.923 39.572 44.975 22.053]
"""
|
[
2,
3,
4,
5,
6
] |
2,190 |
11a31d3276201105ca7485fa4e4eb711012accd5
|
<mask token>
|
<mask token>
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
<mask token>
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
<mask token>
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
# chikenList = ['pelicana']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
# print(myframe.head())
# print('-'*30)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
[
0,
1,
2,
3,
4
] |
2,191 |
78037d936ee5f9b31bf00263885fbec225a4f8f2
|
<mask token>
|
<mask token>
if n % 10 == 1 and (n < 11 or n > 20):
print(n, 'korova')
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, 'korovy')
else:
print(n, 'korov')
|
n = int(input())
if n % 10 == 1 and (n < 11 or n > 20):
print(n, 'korova')
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, 'korovy')
else:
print(n, 'korov')
|
n = int(input())
if n % 10 == 1 and (n < 11 or n > 20):
print(n, "korova")
elif n % 10 > 1 and n % 10 < 5 and (n < 11 or n > 20):
print(n, "korovy")
else:
print(n, "korov")
| null |
[
0,
1,
2,
3
] |
2,192 |
a0cce8d48f929dd63ba809a1e9bf02b172e8bc1b
|
<mask token>
|
<mask token>
class Carafe(object):
<mask token>
|
<mask token>
class Carafe(object):
def __init__(self):
self.level = CarafeLevel()
self.temp = CarafeTemp()
|
from barista.sensor import CarafeLevel, CarafeTemp
class Carafe(object):
def __init__(self):
self.level = CarafeLevel()
self.temp = CarafeTemp()
|
from barista.sensor import CarafeLevel, CarafeTemp
class Carafe(object):
def __init__(self):
self.level = CarafeLevel()
self.temp = CarafeTemp()
# TODO add callback for when the temperature or level are too low.
|
[
0,
1,
2,
3,
4
] |
2,193 |
f0444676d28be27ad2f0f7cdaa58a96b7facc546
|
# -*- coding: utf-8 -*-
from optparse import make_option
from django.core.management.base import BaseCommand, LabelCommand, CommandError
from open_coesione import utils
import sys
import logging
import csv
import os
class Command(LabelCommand):
"""
Task to extract data related to a sample of all projects.
The sample of projects can be extracted through:
head -n 1 progetti_YYYYMMDD.csv > progetti_sample.csv
tail -n +2 progetti_YYYYMMDD.csv | shuf -n 10 | sort >> progetti_sample.csv
"""
args = "<filename>"
help = "Produces a csv file of rows related to projects' sample."
label = 'filename'
option_list = BaseCommand.option_list + (
make_option('--sample',
dest='proj_sample_file',
default='progetti_sample.csv',
help='Select projects sample csv file'),
make_option('--data-root',
dest='data_root',
default='dati/dataset_latest/',
help='Data root path, where csv files are to be found'),
make_option('--type',
dest='type',
default='loc',
help='Type of related data: loc|rec|pay'),
make_option('--encoding',
dest='encoding',
default='latin1',
help='set character encoding of input (and output) csv files')
)
proj_sample_file = ''
sorted_csv_file = ''
data_root = ''
encoding = ''
logger = logging.getLogger('csvimport')
proj_reader = None
csv.register_dialect('opencoesione', delimiter=';', quoting=csv.QUOTE_ALL)
def handle(self, *labels, **options):
if len(labels) is not 1:
raise CommandError('Enter just one %s.' % self.label)
self.data_root = options['data_root']
self.sorted_csv_file = os.path.join(self.data_root, labels[0])
self.proj_sample_file = os.path.join(self.data_root, options['proj_sample_file'])
self.encoding = options['encoding']
# open sample progetto csv reader
try:
self.proj_reader = utils.UnicodeDictReader(
open(self.proj_sample_file, 'r'),
dialect='opencoesione',
encoding=self.encoding
)
except IOError:
self.logger.error("It was impossible to open file %s" % self.proj_sample_file)
exit(1)
except csv.Error, e:
self.logger.error("CSV error while reading %s: %s" % (self.proj_sample_file, e.message))
exit(1)
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
if options['type'] == 'loc':
# to produce the full, sorted localizzazioni file
# head -n 1 localizzazioni_20120630.csv > localizzazioni_sorted.csv
# tail -n +2 localizzazioni_20120630.csv | sort >> localizzazioni_sorted.csv
headers = [
"COD_LOCALE_PROGETTO",
"COD_REGIONE","DEN_REGIONE",
"COD_PROVINCIA","DEN_PROVINCIA",
"COD_COMUNE","DEN_COMUNE",
"INDIRIZZO_PROG","CAP_PROG",
"DPS_TERRITORIO_PROG","DPS_FLAG_CAP_PROG"
]
elif options['type'] == 'rec':
# to produce the full, sorted soggetti file
# head -n 1 soggetti_20120630.csv > soggetti_sorted.csv
# tail -n +2 soggetti_20120630.csv | sort >> soggetti_sorted.csv
headers = [
"COD_LOCALE_PROGETTO",
"SOGG_COD_RUOLO","SOGG_DESCR_RUOLO","SOGG_PROGR_RUOLO",
"DPS_CODICE_FISCALE_SOGG","DPS_DENOMINAZIONE_SOGG",
"COD_FORMA_GIURIDICA_SOGG","DESCR_FORMA_GIURIDICA_SOGG",
"COD_COMUNE_SEDE_SOGG","INDIRIZZO_SOGG","CAP_SOGG",
"COD_ATECO_SOGG", "DESCRIZIONE_ATECO_SOGG"
]
elif options['type'] == 'pay':
headers = [
"COD_LOCALE_PROGETTO",
"DATA_AGGIORNAMENTO",
"TOT_PAGAMENTI"
]
else:
raise CommandError("Wrong type %s. Select between loc and rec." % options['type'])
# open sorted csv file from where to extract record related to progetti_sample
csv_file = os.path.join(self.data_root, labels[0])
self.logger.info("Inizio ricerca in %s" % csv_file)
try:
reader = utils.UnicodeDictReader(
open(csv_file, 'r'),
dialect='opencoesione',
encoding=self.encoding)
except IOError:
self.logger.error("It was impossible to open file %s" % csv_file)
exit(1)
except csv.Error, e:
self.logger.error("CSV error while reading %s: %s" % (csv_file, e.message))
# loop over progetto_sample and advance in localizzazioni, to fetch related records
# this is of O(n), and reduces drastically the extraction time
writer = None
for proj_row in self.proj_reader:
proj_codice_locale = proj_row['COD_LOCALE_PROGETTO']
loc = reader.next()
if writer is None:
writer = utils.UnicodeDictWriter(sys.stdout, headers, dialect='opencoesione', encoding=self.encoding)
while loc['COD_LOCALE_PROGETTO'] < proj_codice_locale:
loc = reader.next()
writer.writerow(loc)
loc = reader.next()
while loc['COD_LOCALE_PROGETTO'] == proj_codice_locale:
writer.writerow(loc)
loc = reader.next()
| null | null | null | null |
[
0
] |
2,194 |
b218f5e401510f844006cb6079737b54aa86827b
|
<mask token>
|
<mask token>
def main():
graphics = BreakoutGraphics()
lives = NUM_LIVES
graphics.window.add(graphics.scoreboard, 0, graphics.window_height)
while True:
pause(FRAME_RATE)
if graphics.ball_fall_down():
lives -= 1
if lives > 0:
graphics.reset_ball()
else:
graphics.game_over()
break
if graphics.you_win():
break
vx = graphics.getx()
vy = graphics.gety()
graphics.ball.move(vx, vy)
graphics.boundary()
graphics.collision()
if __name__ == '__main__':
main()
|
<mask token>
FRAME_RATE = 1000 / 120
NUM_LIVES = 3
def main():
graphics = BreakoutGraphics()
lives = NUM_LIVES
graphics.window.add(graphics.scoreboard, 0, graphics.window_height)
while True:
pause(FRAME_RATE)
if graphics.ball_fall_down():
lives -= 1
if lives > 0:
graphics.reset_ball()
else:
graphics.game_over()
break
if graphics.you_win():
break
vx = graphics.getx()
vy = graphics.gety()
graphics.ball.move(vx, vy)
graphics.boundary()
graphics.collision()
if __name__ == '__main__':
main()
|
<mask token>
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120
NUM_LIVES = 3
def main():
graphics = BreakoutGraphics()
lives = NUM_LIVES
graphics.window.add(graphics.scoreboard, 0, graphics.window_height)
while True:
pause(FRAME_RATE)
if graphics.ball_fall_down():
lives -= 1
if lives > 0:
graphics.reset_ball()
else:
graphics.game_over()
break
if graphics.you_win():
break
vx = graphics.getx()
vy = graphics.gety()
graphics.ball.move(vx, vy)
graphics.boundary()
graphics.collision()
if __name__ == '__main__':
main()
|
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
YOUR DESCRIPTION HERE
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 1000 / 120 # 120 frames per second.
NUM_LIVES = 3
def main():
graphics = BreakoutGraphics()
lives = NUM_LIVES # 生命
graphics.window.add(graphics.scoreboard, 0, graphics.window_height) # 計分板
# Add animation loop here!
while True:
pause(FRAME_RATE)
if graphics.ball_fall_down():
lives -= 1
if lives > 0:
graphics.reset_ball()
else:
graphics.game_over()
break
if graphics.you_win():
break
vx = graphics.getx()
vy = graphics.gety()
graphics.ball.move(vx, vy)
graphics.boundary()
graphics.collision()
if __name__ == '__main__':
main()
|
[
0,
2,
3,
4,
5
] |
2,195 |
53c874fbe14031c323f83db58f17990f4e60bc58
|
<mask token>
class BilanComptes(object):
<mask token>
<mask token>
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
<mask token>
class BilanComptes(object):
<mask token>
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
<mask token>
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
from outils import Outils
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = 'bilan-subsides-comptes_' + str(subedition.annee_fin_general
) + '_' + Outils.mois_string(subedition.mois_fin_general) + '.csv'
with dossier_destination.writer(nom) as fichier_writer:
ligne = ['année', 'mois', 'code client', 'code client sap',
'abrév. labo', 'nom labo', 'type client', 'nature client',
'id-compte', 'numéro compte', 'intitulé compte',
'code type compte', 'code type subside', 'Subsides MAj',
'Subsides MOj']
for categorie in subgeneraux.codes_d3():
ligne.append('Subsides ' + categorie + 'j')
ligne += ['total Subsides']
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda
x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.
mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'],
client['nature'], id_compte, num_compte, compte[
'intitule'], compte['type'], compte['t3'], Outils.
format_2_dec(compte['s-mat']), Outils.format_2_dec(
compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' +
categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
from outils import Outils
class BilanComptes(object):
"""
Classe pour la création du bilan des comptes
"""
@staticmethod
def bilan(dossier_destination, subedition, subgeneraux, lignes):
"""
création du bilan
:param dossier_destination: Une instance de la classe dossier.DossierDestination
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param lignes: lignes de données du bilan
"""
nom = "bilan-subsides-comptes_" + str(subedition.annee_fin_general) + "_" + \
Outils.mois_string(subedition.mois_fin_general) + ".csv"
with dossier_destination.writer(nom) as fichier_writer:
ligne = ["année", "mois", "code client", "code client sap", "abrév. labo", "nom labo", "type client",
"nature client", "id-compte", "numéro compte", "intitulé compte", "code type compte",
"code type subside", "Subsides MAj", "Subsides MOj"]
for categorie in subgeneraux.codes_d3():
ligne.append("Subsides " + categorie + "j")
ligne += ["total Subsides"]
fichier_writer.writerow(ligne)
for ligne in lignes:
fichier_writer.writerow(ligne)
@staticmethod
def creation_lignes(subedition, subgeneraux, consolidation):
"""
génération des lignes de données du bilan
:param subedition: paramètres d'édition
:param subgeneraux: paramètres généraux
:param consolidation: classe de consolidation des données des bilans
:return: lignes de données du bilan
"""
lignes = []
for code_client, client in sorted(consolidation.clients.items()):
numbers = {}
for id_compte, compte in client['comptes'].items():
numbers[id_compte] = compte['num_compte']
for id_compte, num_compte in sorted(numbers.items(), key=lambda x: x[1]):
compte = client['comptes'][id_compte]
if compte['subs'] > 0:
ligne = [subedition.annee_fin_general, subedition.mois_fin_general, code_client, client['sap'],
client['abrev'], client['nom'], client['type'], client['nature'], id_compte,
num_compte, compte['intitule'], compte['type'], compte['t3'],
Outils.format_2_dec(compte['s-mat']), Outils.format_2_dec(compte['s-mot'])]
for categorie in subgeneraux.codes_d3():
ligne.append(Outils.format_2_dec(compte['s-' + categorie + 't']))
ligne += [Outils.format_2_dec(compte['subs'])]
lignes.append(ligne)
return lignes
|
[
2,
3,
4,
5,
6
] |
2,196 |
9e3f4484542c2629d636fcb4166584ba52bebe21
|
<mask token>
|
<mask token>
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
|
from LibTools.filesystem import Carpeta
from slaves import SentinelSat
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
|
# -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelSat
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
| null |
[
0,
1,
2,
3
] |
2,197 |
1d0730e8fd120e1c4bc5b89cbd766234e1fa3bca
|
<mask token>
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True
alpha_standard = True
alpha_industry_neutral = True
alpha_barra_style_neutral = True
price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')
alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')
industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([
alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'
)
beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'
)
nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',
None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,
'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([
size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],
beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],
beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=
cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(' Calculating Factor %s Alpha Return At %s' % (factor_name,
cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date
alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()
) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()
) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',
'momentum']
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))
]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=
labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, 'IC'] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[
i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()
cum_labels = [('Cum_' + str(x)) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(
date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date,
back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.
strptime(x, '%Y%m%d').year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(
)
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,
year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'
] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'
] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[
'CumFactorReturn'].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(
) / alpha_return['IC'].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index
=labels, columns=['group_return'])
corr_pd['group_number'] = list(range(1, group_number + 1))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +
'_FactorExposureNeutral.csv')
alpha_exposure.T.to_csv(filename)
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[
'Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date - 1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:
cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'
] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'
].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability',
factor_name + '_FactorExposureCorr.csv')
exposure_corr.to_csv(filename)
filename = os.path.join(out_path, 'alpha_return', factor_name +
'_FactorReturn.xlsx')
sheet_name = 'FactorReturn'
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number
=1, num_format_pd=num_format_pd, color='blue', fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=
2 + len(year_describe.columns), num_format_pd=num_format_pd, color=
'blue', fillna=True)
we.close()
<mask token>
|
<mask token>
def factor_neutral(factor_series, neutral_frame):
"""
中性化
"""
concat_data = pd.concat([factor_series, neutral_frame], axis=1)
concat_data = concat_data.dropna()
factor_val = concat_data.ix[:, 0]
neutral_val = concat_data.ix[:, 1:]
model = sm.OLS(factor_val.values, neutral_val.values)
regress = model.fit()
params = regress.params
params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])
factor_res = factor_val - regress.predict(neutral_val)
return params, factor_res
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True
alpha_standard = True
alpha_industry_neutral = True
alpha_barra_style_neutral = True
price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')
alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')
industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([
alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'
)
beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'
)
nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',
None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,
'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([
size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],
beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],
beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=
cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(' Calculating Factor %s Alpha Return At %s' % (factor_name,
cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date
alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()
) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()
) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',
'momentum']
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))
]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=
labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, 'IC'] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[
i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()
cum_labels = [('Cum_' + str(x)) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(
date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date,
back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.
strptime(x, '%Y%m%d').year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(
)
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,
year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'
] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'
] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[
'CumFactorReturn'].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(
) / alpha_return['IC'].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index
=labels, columns=['group_return'])
corr_pd['group_number'] = list(range(1, group_number + 1))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +
'_FactorExposureNeutral.csv')
alpha_exposure.T.to_csv(filename)
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[
'Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date - 1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:
cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'
] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'
].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability',
factor_name + '_FactorExposureCorr.csv')
exposure_corr.to_csv(filename)
filename = os.path.join(out_path, 'alpha_return', factor_name +
'_FactorReturn.xlsx')
sheet_name = 'FactorReturn'
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number
=1, num_format_pd=num_format_pd, color='blue', fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=
2 + len(year_describe.columns), num_format_pd=num_format_pd, color=
'blue', fillna=True)
we.close()
<mask token>
|
<mask token>
def factor_neutral(factor_series, neutral_frame):
"""
中性化
"""
concat_data = pd.concat([factor_series, neutral_frame], axis=1)
concat_data = concat_data.dropna()
factor_val = concat_data.ix[:, 0]
neutral_val = concat_data.ix[:, 1:]
model = sm.OLS(factor_val.values, neutral_val.values)
regress = model.fit()
params = regress.params
params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])
factor_res = factor_val - regress.predict(neutral_val)
return params, factor_res
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True
alpha_standard = True
alpha_industry_neutral = True
alpha_barra_style_neutral = True
price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')
alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')
industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([
alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'
)
beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'
)
nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',
None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,
'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([
size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],
beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],
beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=
cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(' Calculating Factor %s Alpha Return At %s' % (factor_name,
cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date
alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()
) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()
) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',
'momentum']
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))
]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=
labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, 'IC'] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[
i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()
cum_labels = [('Cum_' + str(x)) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(
date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date,
back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.
strptime(x, '%Y%m%d').year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(
)
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,
year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'
] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'
] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[
'CumFactorReturn'].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(
) / alpha_return['IC'].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index
=labels, columns=['group_return'])
corr_pd['group_number'] = list(range(1, group_number + 1))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +
'_FactorExposureNeutral.csv')
alpha_exposure.T.to_csv(filename)
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[
'Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date - 1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:
cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'
] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'
].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability',
factor_name + '_FactorExposureCorr.csv')
exposure_corr.to_csv(filename)
filename = os.path.join(out_path, 'alpha_return', factor_name +
'_FactorReturn.xlsx')
sheet_name = 'FactorReturn'
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number
=1, num_format_pd=num_format_pd, color='blue', fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=
2 + len(year_describe.columns), num_format_pd=num_format_pd, color=
'blue', fillna=True)
we.close()
if __name__ == '__main__':
cal_period = 'W'
beg_date = '20040101'
end_date = datetime.today().strftime('%Y%m%d')
path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
file = 'MyAlpha.xlsx'
data = pd.read_excel(os.path.join(path, file), encoding='gbk')
data = data[data['计算因子收益率'] == '是']
data = data.reset_index(drop=True)
for i in range(0, len(data)):
factor_name = data.ix[i, '因子名']
print('#################### 开始计算因子收益率 %s 数据 ####################' %
factor_name)
cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)
print('#################### 结束计算因子收益率 %s 数据 ####################' %
factor_name)
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from datetime import datetime
import statsmodels.api as sm
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.utility_fun.factor_preprocess import FactorPreProcess
from quant.utility_fun.write_excel import WriteExcel
def factor_neutral(factor_series, neutral_frame):
"""
中性化
"""
concat_data = pd.concat([factor_series, neutral_frame], axis=1)
concat_data = concat_data.dropna()
factor_val = concat_data.ix[:, 0]
neutral_val = concat_data.ix[:, 1:]
model = sm.OLS(factor_val.values, neutral_val.values)
regress = model.fit()
params = regress.params
params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])
factor_res = factor_val - regress.predict(neutral_val)
return params, factor_res
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True
alpha_standard = True
alpha_industry_neutral = True
alpha_barra_style_neutral = True
price = Stock().get_factor_h5('PriceCloseAdjust', None, 'alpha_dfc')
alpha_val = Stock().get_factor_h5(factor_name, None, 'alpha_dfc')
industry = Stock().get_factor_h5('industry_citic1', None, 'primary_mfc')
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([
alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5('NORMAL_CNE5_SIZE', None, 'barra_risk_dfc'
)
beta = Stock().get_factor_h5('NORMAL_CNE5_BETA', None, 'barra_risk_dfc'
)
nolin_size = Stock().get_factor_h5('NORMAL_CNE5_NON_LINEAR_SIZE',
None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5('NORMAL_CNE5_MOMENTUM', None,
'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([
size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0],
beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1],
beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=
cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(' Calculating Factor %s Alpha Return At %s' % (factor_name,
cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, 'BuyDate'] = buy_date
alpha_return.ix[cur_cal_date, 'SellDate'] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()
) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()
) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size',
'momentum']
params, factor_res = factor_neutral(factor_series=
alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(
alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = [('group_' + str(i)) for i in list(range(1, group_number + 1))
]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=
labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, 'FactorReturn'] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, 'IC'] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[
i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return['CumFactorReturn'] = alpha_return['FactorReturn'].cumsum()
cum_labels = [('Cum_' + str(x)) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(
date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date,
back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.
strptime(x, '%Y%m%d').year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum(
)
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean,
year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'
] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'
] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return[
'CumFactorReturn'].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return['IC'].mean(
) / alpha_return['IC'].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return['IC'].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return['IC'].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index
=labels, columns=['group_return'])
corr_pd['group_number'] = list(range(1, group_number + 1))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name +
'_FactorExposureNeutral.csv')
alpha_exposure.T.to_csv(filename)
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=[
'Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date - 1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:
cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'
] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'
].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability',
factor_name + '_FactorExposureCorr.csv')
exposure_corr.to_csv(filename)
filename = os.path.join(out_path, 'alpha_return', factor_name +
'_FactorReturn.xlsx')
sheet_name = 'FactorReturn'
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number
=1, num_format_pd=num_format_pd, color='blue', fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=[
'format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=
2 + len(year_describe.columns), num_format_pd=num_format_pd, color=
'blue', fillna=True)
we.close()
if __name__ == '__main__':
cal_period = 'W'
beg_date = '20040101'
end_date = datetime.today().strftime('%Y%m%d')
path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
file = 'MyAlpha.xlsx'
data = pd.read_excel(os.path.join(path, file), encoding='gbk')
data = data[data['计算因子收益率'] == '是']
data = data.reset_index(drop=True)
for i in range(0, len(data)):
factor_name = data.ix[i, '因子名']
print('#################### 开始计算因子收益率 %s 数据 ####################' %
factor_name)
cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)
print('#################### 结束计算因子收益率 %s 数据 ####################' %
factor_name)
|
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
from datetime import datetime
import statsmodels.api as sm
from quant.stock.stock import Stock
from quant.stock.date import Date
from quant.utility_fun.factor_preprocess import FactorPreProcess
from quant.utility_fun.write_excel import WriteExcel
def factor_neutral(factor_series, neutral_frame):
"""
中性化
"""
concat_data = pd.concat([factor_series, neutral_frame], axis=1)
concat_data = concat_data.dropna()
factor_val = concat_data.ix[:, 0]
neutral_val = concat_data.ix[:, 1:]
model = sm.OLS(factor_val.values, neutral_val.values)
regress = model.fit()
params = regress.params
params = pd.DataFrame(params, index=neutral_val.columns, columns=['param'])
factor_res = factor_val - regress.predict(neutral_val)
return params, factor_res
def cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period):
# param
###############################################################################################################
###############################################################################################################
group_number = 8
year_trade_days = 242
min_stock_number = 100
out_path = 'E:\\3_Data\\5_stock_data\\3_alpha_model\\'
alpha_remove_extreme_value = True # alpha 因子 取极值
alpha_standard = True # alpha 因子 标准化
alpha_industry_neutral = True # alpha 因子 行业中性
alpha_barra_style_neutral = True # alpha 因子 风格中性
# read data
###############################################################################################################
###############################################################################################################
price = Stock().get_factor_h5("PriceCloseAdjust", None, "alpha_dfc")
alpha_val = Stock().get_factor_h5(factor_name, None, "alpha_dfc")
industry = Stock().get_factor_h5("industry_citic1", None, "primary_mfc")
industry = industry.applymap(lambda x: x.decode('utf-8'))
[alpha_val, industry] = FactorPreProcess().make_same_index_columns([alpha_val, industry])
if alpha_barra_style_neutral:
size = Stock().get_factor_h5("NORMAL_CNE5_SIZE", None, 'barra_risk_dfc')
beta = Stock().get_factor_h5("NORMAL_CNE5_BETA", None, 'barra_risk_dfc')
nolin_size = Stock().get_factor_h5("NORMAL_CNE5_NON_LINEAR_SIZE", None, 'barra_risk_dfc')
momentum = Stock().get_factor_h5("NORMAL_CNE5_MOMENTUM", None, 'barra_risk_dfc')
[size, beta, nolin_size] = FactorPreProcess().make_same_index_columns([size, beta, nolin_size])
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0], beta.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1], beta.columns[-1])
else:
beg_date = max(beg_date, price.columns[0], alpha_val.columns[0])
end_date = min(end_date, price.columns[-1], alpha_val.columns[-1])
date_series = Date().get_trade_date_series(beg_date, end_date, period=cal_period)
date_series = list(set(date_series) & set(alpha_val.columns))
date_series.sort()
# pre process data
###############################################################################################################
###############################################################################################################
if alpha_remove_extreme_value:
alpha_val = FactorPreProcess().remove_extreme_value_mad(alpha_val)
if alpha_standard:
alpha_val = FactorPreProcess().standardization(alpha_val)
# cal everyday
###############################################################################################################
###############################################################################################################
alpha_return = pd.DataFrame([], index=date_series)
alpha_exposure = pd.DataFrame([], index=date_series, columns=price.index)
for i_date in range(len(date_series) - 2):
cur_cal_date = date_series[i_date]
next_cal_date = date_series[i_date + 1]
buy_date = Date().get_trade_date_offset(cur_cal_date, 1)
sell_date = Date().get_trade_date_offset(next_cal_date, 1)
print(" Calculating Factor %s Alpha Return At %s" % (factor_name, cur_cal_date))
alpha_return.index.name = 'CalDate'
alpha_return.ix[cur_cal_date, "BuyDate"] = buy_date
alpha_return.ix[cur_cal_date, "SellDate"] = sell_date
alpha_date = alpha_val[cur_cal_date]
buy_price = price[buy_date]
sell_price = price[sell_date]
pct_date = sell_price / buy_price - 1.0
if alpha_industry_neutral:
try:
industry_date = industry[cur_cal_date]
industry_dummy = pd.get_dummies(industry_date)
except:
continue
if len(pd.concat([alpha_date, industry_date], axis=1).dropna()) < min_stock_number:
continue
else:
params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=industry_dummy)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
if alpha_barra_style_neutral:
try:
size_date = size[cur_cal_date]
beta_date = beta[cur_cal_date]
nolin_size_date = nolin_size[cur_cal_date]
momentum_date = momentum[cur_cal_date]
except:
continue
if len(pd.concat([alpha_date, size_date], axis=1).dropna()) < min_stock_number:
continue
else:
barra_risk_exposure = pd.concat([beta_date, size_date,
nolin_size_date, momentum_date], axis=1)
barra_risk_exposure.columns = ['beta', 'size', 'nolin_size', 'momentum']
params, factor_res = factor_neutral(factor_series=alpha_date, neutral_frame=barra_risk_exposure)
alpha_date = factor_res
alpha_date = FactorPreProcess().remove_extreme_value_mad(alpha_date)
alpha_date = FactorPreProcess().standardization(alpha_date)
alpha_exposure.ix[cur_cal_date, :] = alpha_date
res = pd.concat([alpha_date, pct_date], axis=1)
res.columns = ['alpha_val', 'period_pct']
res = res.dropna()
res = res.sort_values(by=['alpha_val'], ascending=False)
labels = ["group_" + str(i) for i in list(range(1, group_number + 1))]
res['group'] = pd.cut(res['alpha_val'], bins=group_number, labels=labels)
period_return = (res['alpha_val'] * res['period_pct']).mean()
alpha_return.ix[cur_cal_date, "FactorReturn"] = period_return
information_correlation = res['alpha_val'].corr(res['period_pct'])
alpha_return.ix[cur_cal_date, "IC"] = information_correlation
group_pct = res.groupby(by=['group'])['period_pct'].mean()
for i_label in range(len(labels)):
alpha_return.ix[cur_cal_date, labels[i_label]] = group_pct.values[i_label]
alpha_return = alpha_return.dropna(subset=['FactorReturn'])
alpha_return["CumFactorReturn"] = alpha_return['FactorReturn'].cumsum()
cum_labels = ["Cum_" + str(x) for x in labels]
alpha_return[cum_labels] = alpha_return[labels].cumsum()
# plot
###############################################################################################################
###############################################################################################################
# plt_col = []
# plt_col.append("CumFactorReturn")
# plt_col.extend(cum_labels)
# alpha_return[plt_col].plot()
# plt.title(factor_name)
# plt.show()
# describe annual
###############################################################################################################
###############################################################################################################
back_test_beg_date = Date().get_trade_date_offset(date_series[0], 1)
back_test_end_date = Date().get_trade_date_offset(date_series[len(date_series) - 1], 1)
back_test_days = Date().get_trade_date_diff(back_test_beg_date, back_test_end_date)
backtest_year = back_test_days / year_trade_days
alpha_return['year'] = alpha_return.index.map(lambda x: datetime.strptime(x, "%Y%m%d").year)
year_factor_return = alpha_return.groupby(by=['year'])['FactorReturn'].sum()
year_count = alpha_return.groupby(by=['year'])['FactorReturn'].count()
year_ic_mean = alpha_return.groupby(by=['year'])['IC'].mean()
year_ic_std = alpha_return.groupby(by=['year'])['IC'].std()
year_gp_mean = alpha_return.groupby(by=['year'])[labels].mean()
year_describe = pd.concat([year_factor_return, year_count, year_ic_mean, year_ic_std, year_gp_mean], axis=1)
col = ['YearFactorReturn', 'Count', 'IC_mean', 'IC_std']
col.extend(labels)
year_describe.columns = col
year_describe['YearFactorReturn'] = year_describe['YearFactorReturn'] / year_describe['Count'] * year_count
year_describe['IC_IR'] = year_describe['IC_mean'] / year_describe['IC_std'] * np.sqrt(50)
year_describe.ix['Sum', 'YearFactorReturn'] = alpha_return["CumFactorReturn"].values[-1] / backtest_year
year_describe.ix['Sum', 'IC_IR'] = alpha_return["IC"].mean() / alpha_return["IC"].std() * np.sqrt(50)
year_describe.ix['Sum', 'IC_mean'] = alpha_return["IC"].mean()
year_describe.ix['Sum', 'IC_std'] = alpha_return["IC"].std()
year_describe.ix['Sum', labels] = year_describe.ix[0:-1, labels].sum()
year_describe.index = year_describe.index.map(str)
for i in range(len(year_describe)):
year = year_describe.index[i]
corr_pd = pd.DataFrame(year_describe.ix[year, labels].values, index=labels, columns=['group_return'])
corr_pd['group_number'] = (list(range(1, group_number+1)))
year_describe.ix[year, 'Group_Corr'] = corr_pd.corr().ix[0, 1]
# save data
###############################################################################################################
###############################################################################################################
# alpha_exposure_neutral
###############################################################################################################
alpha_exposure = alpha_exposure.astype(np.float)
filename = os.path.join(out_path, 'alpha_exposure_neutral', factor_name + "_FactorExposureNeutral.csv")
alpha_exposure.T.to_csv(filename)
# exposure_corr
###############################################################################################################
exposure_corr = pd.DataFrame([], index=alpha_exposure.index, columns=['Exposure_Corr'])
for i_date in range(1, len(alpha_exposure.index)):
last_exposure_date = alpha_exposure.index[i_date-1]
cur_exposure_date = alpha_exposure.index[i_date]
exposure_adjoin = alpha_exposure.ix[last_exposure_date:cur_exposure_date, :]
exposure_adjoin = exposure_adjoin.T.dropna()
exposure_corr.ix[cur_exposure_date, 'Exposure_Corr'] = exposure_adjoin.corr().ix[0, 1]
exposure_corr = exposure_corr.dropna()
exposure_corr.ix['Mean', 'Exposure_Corr'] = exposure_corr['Exposure_Corr'].mean()
filename = os.path.join(out_path, 'alpha_exposure_stability', factor_name + "_FactorExposureCorr.csv")
exposure_corr.to_csv(filename)
# Factor Return
###############################################################################################################
filename = os.path.join(out_path, 'alpha_return', factor_name + "_FactorReturn.xlsx")
sheet_name = "FactorReturn"
we = WriteExcel(filename)
ws = we.add_worksheet(sheet_name)
num_format_pd = pd.DataFrame([], columns=year_describe.columns, index=['format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['Count', 'IC_IR']] = '0.00'
we.write_pandas(year_describe, ws, begin_row_number=0, begin_col_number=1,
num_format_pd=num_format_pd, color="blue", fillna=True)
num_format_pd = pd.DataFrame([], columns=alpha_return.columns, index=['format'])
num_format_pd.ix['format', :] = '0.00%'
num_format_pd.ix['format', ['year']] = '0'
we.write_pandas(alpha_return, ws, begin_row_number=0, begin_col_number=2+len(year_describe.columns),
num_format_pd=num_format_pd, color="blue", fillna=True)
we.close()
###############################################################################################################
if __name__ == '__main__':
cal_period = "W"
beg_date = "20040101"
end_date = datetime.today().strftime("%Y%m%d")
path = "E:\\3_Data\\5_stock_data\\3_alpha_model\\"
file = "MyAlpha.xlsx"
data = pd.read_excel(os.path.join(path, file), encoding='gbk')
data = data[data['计算因子收益率'] == "是"]
data = data.reset_index(drop=True)
for i in range(0, len(data)):
factor_name = data.ix[i, "因子名"]
print("#################### 开始计算因子收益率 %s 数据 ####################" % factor_name)
cal_factor_alpha_return(factor_name, beg_date, end_date, cal_period)
print("#################### 结束计算因子收益率 %s 数据 ####################" % factor_name)
|
[
1,
2,
3,
4,
5
] |
2,198 |
3c88e13e8796c5f39180a9a514f0528a074460a6
|
<mask token>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
<mask token>
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
<mask token>
|
<mask token>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
<mask token>
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
<mask token>
|
<mask token>
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
"""Basically testing to see if the cache can store and recall info"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
if __name__ == '__main__':
test_1()
test_2()
test_3()
test_4()
|
from collections import OrderedDict
class LRU_Cache(object):
def __init__(self, capacity):
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
"""Basically testing to see if the cache can store and recall info"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
"""testing to see if the least used object gets removed"""
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
"""entering null key to be set, should not work"""
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
def test_4():
"""0 capacity test case"""
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(
f'Current Cache state: {our_cache} expected result is for it to be empty'
)
if __name__ == '__main__':
test_1()
test_2()
test_3()
test_4()
|
from collections import OrderedDict
class LRU_Cache(object):
def __init__(self, capacity):
# Initialize class variables
self.size = capacity
self.jar = OrderedDict()
pass
def get(self, key):
# Retrieve item from provided key. Return -1 if nonexistent.
if key not in self.jar:
return -1
else:
rtn = self.jar.get(key)
self.jar.move_to_end(key)
return rtn
def set(self, key, value):
# Set the value if the key is not present in the cache. If the cache is at capacity remove the oldest item.
if key is None:
return
if len(self.jar) == self.size:
self.jar.popitem(last=False)
self.jar[key] = value
else:
self.jar[key] = value
return
def __str__(self):
return f'{self.jar}'
def test_1():
'''Basically testing to see if the cache can store and recall info'''
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print(f'Cache get 1 returns -> {our_cache.get(1)} | expected result = 1')
def test_2():
'''testing to see if the least used object gets removed'''
our_cache = LRU_Cache(5)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
our_cache.set(5, 5)
our_cache.get(1)
our_cache.set(6, 6)
print(f'Cache get 2 returns -> {our_cache.get(2)} | expected result = -1')
def test_3():
'''entering null key to be set, should not work'''
our_cache = LRU_Cache(5)
[our_cache.set(None, 1) for _ in range(5)]
print(f'Current Cache state: {our_cache} expected result is for it to be empty')
def test_4():
'''0 capacity test case'''
our_cache = LRU_Cache(0)
[our_cache.set(None, 1) for _ in range(5)]
print(f'Current Cache state: {our_cache} expected result is for it to be empty')
if __name__ == "__main__":
test_1()
test_2()
test_3()
test_4()
|
[
6,
8,
10,
11,
12
] |
2,199 |
e12c397ca1ae91ce314cda5fe2cd8e0ec4cfa861
|
<mask token>
class PrivateFile2(models.Model):
<mask token>
<mask token>
|
<mask token>
class PrivateFile(models.Model):
<mask token>
<mask token>
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
|
<mask token>
class PrivateFile(models.Model):
title = models.CharField('Title', max_length=200)
file = PrivateFileField('File')
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
|
from django.db import models
from private_storage.fields import PrivateFileField
class PrivateFile(models.Model):
title = models.CharField('Title', max_length=200)
file = PrivateFileField('File')
class PrivateFile2(models.Model):
title = models.CharField('Title', max_length=200)
file = models.FileField('File')
|
from django.db import models
from private_storage.fields import PrivateFileField
class PrivateFile(models.Model):
title = models.CharField("Title", max_length=200)
file = PrivateFileField("File")
class PrivateFile2(models.Model):
title = models.CharField("Title", max_length=200)
file = models.FileField("File")
|
[
1,
3,
4,
5,
6
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.